From d3f59bedb393521986e645bc48c47938f321b643 Mon Sep 17 00:00:00 2001 From: Miloslav Trmač Date: Tue, 1 Oct 2019 22:15:58 +0200 Subject: Update c/image to v4.0.1 and buildah to 1.11.3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This requires updating all import paths throughout, and a matching buildah update to interoperate. I can't figure out the reason for go.mod tracking github.com/containers/image v3.0.2+incompatible // indirect ((go mod graph) lists it as a direct dependency of libpod, but (go list -json -m all) lists it as an indirect dependency), but at least looking at the vendor subdirectory, it doesn't seem to be actually used in the built binaries. Signed-off-by: Miloslav Trmač --- cmd/podman/login.go | 6 +- cmd/podman/logout.go | 4 +- cmd/podman/pull.go | 8 +- cmd/podman/push.go | 6 +- cmd/podman/runlabel.go | 2 +- cmd/podman/search.go | 2 +- cmd/podman/shared/container.go | 2 +- cmd/podman/shared/create.go | 2 +- cmd/podman/sign.go | 6 +- contrib/perftest/main.go | 2 +- go.mod | 29 +- go.sum | 61 + libpod/container.go | 2 +- libpod/container_commit.go | 2 +- libpod/container_inspect.go | 2 +- libpod/image/docker_registry_options.go | 4 +- libpod/image/image.go | 22 +- libpod/image/parts.go | 2 +- libpod/image/pull.go | 22 +- libpod/image/pull_test.go | 6 +- libpod/image/search.go | 4 +- libpod/image/utils.go | 8 +- libpod/options.go | 2 +- libpod/runtime.go | 4 +- libpod/runtime_img.go | 6 +- libpod/storage.go | 4 +- pkg/adapter/containers.go | 2 +- pkg/adapter/pods.go | 2 +- pkg/adapter/runtime.go | 4 +- pkg/adapter/runtime_remote.go | 4 +- pkg/registries/registries.go | 4 +- pkg/spec/createconfig.go | 2 +- pkg/trust/trust.go | 2 +- pkg/util/utils.go | 2 +- pkg/varlinkapi/images.go | 8 +- vendor/github.com/Microsoft/go-winio/file.go | 18 +- vendor/github.com/Microsoft/go-winio/go.mod | 9 + vendor/github.com/Microsoft/go-winio/go.sum | 16 + vendor/github.com/Microsoft/go-winio/hvsock.go | 305 +++++ vendor/github.com/Microsoft/go-winio/pipe.go | 231 ++-- .../github.com/Microsoft/go-winio/pkg/guid/guid.go | 235 ++++ vendor/github.com/Microsoft/go-winio/syscall.go | 2 +- .../Microsoft/go-winio/zsyscall_windows.go | 88 +- .../Microsoft/hcsshim/osversion/osversion.go | 51 + .../Microsoft/hcsshim/osversion/windowsbuilds.go | 10 + vendor/github.com/containers/buildah/.travis.yml | 1 + vendor/github.com/containers/buildah/CHANGELOG.md | 5 + vendor/github.com/containers/buildah/buildah.go | 4 +- vendor/github.com/containers/buildah/changelog.txt | 26 + vendor/github.com/containers/buildah/commit.go | 16 +- vendor/github.com/containers/buildah/common.go | 4 +- vendor/github.com/containers/buildah/config.go | 6 +- .../github.com/containers/buildah/docker/types.go | 4 +- vendor/github.com/containers/buildah/go.mod | 56 +- vendor/github.com/containers/buildah/go.sum | 286 +++-- vendor/github.com/containers/buildah/image.go | 10 +- .../containers/buildah/imagebuildah/build.go | 6 +- .../containers/buildah/imagebuildah/executor.go | 12 +- .../buildah/imagebuildah/stage_executor.go | 37 +- .../containers/buildah/imagebuildah/util.go | 40 +- vendor/github.com/containers/buildah/import.go | 8 +- vendor/github.com/containers/buildah/info.go | 12 + vendor/github.com/containers/buildah/new.go | 12 +- .../containers/buildah/pkg/blobcache/blobcache.go | 10 +- .../containers/buildah/pkg/parse/parse.go | 6 +- vendor/github.com/containers/buildah/pull.go | 24 +- vendor/github.com/containers/buildah/util.go | 8 +- vendor/github.com/containers/buildah/util/util.go | 14 +- vendor/github.com/containers/image/LICENSE | 189 ---- vendor/github.com/containers/image/copy/copy.go | 920 --------------- .../github.com/containers/image/copy/manifest.go | 121 -- .../containers/image/copy/progress_reader.go | 28 - vendor/github.com/containers/image/copy/sign.go | 31 - .../containers/image/directory/directory_dest.go | 260 ----- .../containers/image/directory/directory_src.go | 96 -- .../image/directory/directory_transport.go | 187 ---- .../image/directory/explicitfilepath/path.go | 56 - .../containers/image/docker/archive/dest.go | 72 -- .../containers/image/docker/archive/src.go | 40 - .../containers/image/docker/archive/transport.go | 160 --- vendor/github.com/containers/image/docker/cache.go | 23 - .../containers/image/docker/daemon/client.go | 85 -- .../containers/image/docker/daemon/daemon_dest.go | 144 --- .../containers/image/docker/daemon/daemon_src.go | 62 -- .../image/docker/daemon/daemon_transport.go | 223 ---- .../containers/image/docker/docker_client.go | 643 ----------- .../containers/image/docker/docker_image.go | 107 -- .../containers/image/docker/docker_image_dest.go | 611 ---------- .../containers/image/docker/docker_image_src.go | 451 -------- .../containers/image/docker/docker_transport.go | 168 --- .../containers/image/docker/lookaside.go | 202 ---- .../image/docker/policyconfiguration/naming.go | 56 - .../containers/image/docker/reference/README.md | 2 - .../containers/image/docker/reference/helpers.go | 42 - .../containers/image/docker/reference/normalize.go | 181 --- .../containers/image/docker/reference/reference.go | 433 -------- .../containers/image/docker/reference/regexp.go | 143 --- .../containers/image/docker/tarfile/dest.go | 407 ------- .../containers/image/docker/tarfile/doc.go | 3 - .../containers/image/docker/tarfile/src.go | 478 -------- .../containers/image/docker/tarfile/types.go | 28 - .../containers/image/docker/wwwauthenticate.go | 159 --- .../containers/image/image/docker_list.go | 94 -- .../containers/image/image/docker_schema1.go | 202 ---- .../containers/image/image/docker_schema2.go | 351 ------ .../github.com/containers/image/image/manifest.go | 73 -- vendor/github.com/containers/image/image/memory.go | 65 -- vendor/github.com/containers/image/image/oci.go | 198 ---- .../github.com/containers/image/image/sourced.go | 103 -- .../github.com/containers/image/image/unparsed.go | 95 -- .../containers/image/internal/tmpdir/tmpdir.go | 29 - .../containers/image/manifest/docker_schema1.go | 316 ------ .../containers/image/manifest/docker_schema2.go | 255 ----- .../containers/image/manifest/manifest.go | 244 ---- vendor/github.com/containers/image/manifest/oci.go | 130 --- .../containers/image/oci/archive/oci_dest.go | 151 --- .../containers/image/oci/archive/oci_src.go | 102 -- .../containers/image/oci/archive/oci_transport.go | 192 ---- .../containers/image/oci/internal/oci_util.go | 126 --- .../containers/image/oci/layout/oci_dest.go | 306 ----- .../containers/image/oci/layout/oci_src.go | 171 --- .../containers/image/oci/layout/oci_transport.go | 264 ----- .../containers/image/openshift/openshift-copies.go | 1174 -------------------- .../containers/image/openshift/openshift.go | 562 ---------- .../image/openshift/openshift_transport.go | 157 --- .../containers/image/ostree/ostree_dest.go | 504 --------- .../containers/image/ostree/ostree_src.go | 416 ------- .../containers/image/ostree/ostree_transport.go | 252 ----- .../image/pkg/blobinfocache/boltdb/boltdb.go | 332 ------ .../containers/image/pkg/blobinfocache/default.go | 75 -- .../internal/prioritize/prioritize.go | 110 -- .../image/pkg/blobinfocache/memory/memory.go | 145 --- .../image/pkg/blobinfocache/none/none.go | 49 - .../image/pkg/compression/compression.go | 94 -- .../containers/image/pkg/docker/config/config.go | 344 ------ .../image/pkg/docker/config/config_linux.go | 79 -- .../image/pkg/docker/config/config_unsupported.go | 16 - .../github.com/containers/image/pkg/keyctl/key.go | 64 -- .../containers/image/pkg/keyctl/keyring.go | 79 -- .../github.com/containers/image/pkg/keyctl/perm.go | 33 - .../containers/image/pkg/keyctl/sys_linux.go | 25 - .../containers/image/pkg/strslice/README.md | 1 - .../containers/image/pkg/strslice/strslice.go | 30 - .../pkg/sysregistriesv2/system_registries_v2.go | 483 -------- .../image/pkg/tlsclientconfig/tlsclientconfig.go | 112 -- .../containers/image/signature/docker.go | 65 -- .../github.com/containers/image/signature/json.go | 88 -- .../containers/image/signature/mechanism.go | 85 -- .../containers/image/signature/mechanism_gpgme.go | 175 --- .../image/signature/mechanism_openpgp.go | 159 --- .../containers/image/signature/policy_config.go | 688 ------------ .../containers/image/signature/policy_eval.go | 289 ----- .../image/signature/policy_eval_baselayer.go | 20 - .../image/signature/policy_eval_signedby.go | 131 --- .../image/signature/policy_eval_simple.go | 29 - .../image/signature/policy_reference_match.go | 101 -- .../containers/image/signature/policy_types.go | 152 --- .../containers/image/signature/signature.go | 280 ----- .../containers/image/storage/storage_image.go | 956 ---------------- .../containers/image/storage/storage_reference.go | 225 ---- .../containers/image/storage/storage_transport.go | 366 ------ vendor/github.com/containers/image/tarball/doc.go | 48 - .../containers/image/tarball/tarball_reference.go | 94 -- .../containers/image/tarball/tarball_src.go | 268 ----- .../containers/image/tarball/tarball_transport.go | 66 -- .../transports/alltransports/alltransports.go | 46 - .../transports/alltransports/docker_daemon.go | 8 - .../transports/alltransports/docker_daemon_stub.go | 9 - .../image/transports/alltransports/ostree.go | 8 - .../image/transports/alltransports/ostree_stub.go | 9 - .../image/transports/alltransports/storage.go | 8 - .../image/transports/alltransports/storage_stub.go | 9 - .../github.com/containers/image/transports/stub.go | 36 - .../containers/image/transports/transports.go | 90 -- vendor/github.com/containers/image/types/types.go | 521 --------- vendor/github.com/containers/image/v4/LICENSE | 189 ++++ vendor/github.com/containers/image/v4/copy/copy.go | 975 ++++++++++++++++ .../containers/image/v4/copy/manifest.go | 121 ++ .../containers/image/v4/copy/progress_reader.go | 28 + vendor/github.com/containers/image/v4/copy/sign.go | 31 + .../image/v4/directory/directory_dest.go | 260 +++++ .../containers/image/v4/directory/directory_src.go | 96 ++ .../image/v4/directory/directory_transport.go | 187 ++++ .../image/v4/directory/explicitfilepath/path.go | 56 + .../containers/image/v4/docker/archive/dest.go | 72 ++ .../containers/image/v4/docker/archive/src.go | 40 + .../image/v4/docker/archive/transport.go | 160 +++ .../github.com/containers/image/v4/docker/cache.go | 23 + .../containers/image/v4/docker/daemon/client.go | 85 ++ .../image/v4/docker/daemon/daemon_dest.go | 144 +++ .../image/v4/docker/daemon/daemon_src.go | 62 ++ .../image/v4/docker/daemon/daemon_transport.go | 223 ++++ .../containers/image/v4/docker/docker_client.go | 645 +++++++++++ .../containers/image/v4/docker/docker_image.go | 107 ++ .../image/v4/docker/docker_image_dest.go | 611 ++++++++++ .../containers/image/v4/docker/docker_image_src.go | 451 ++++++++ .../containers/image/v4/docker/docker_transport.go | 168 +++ .../containers/image/v4/docker/lookaside.go | 202 ++++ .../image/v4/docker/policyconfiguration/naming.go | 56 + .../containers/image/v4/docker/reference/README.md | 2 + .../image/v4/docker/reference/helpers.go | 42 + .../image/v4/docker/reference/normalize.go | 181 +++ .../image/v4/docker/reference/reference.go | 433 ++++++++ .../containers/image/v4/docker/reference/regexp.go | 143 +++ .../containers/image/v4/docker/tarfile/dest.go | 407 +++++++ .../containers/image/v4/docker/tarfile/doc.go | 3 + .../containers/image/v4/docker/tarfile/src.go | 478 ++++++++ .../containers/image/v4/docker/tarfile/types.go | 28 + .../containers/image/v4/docker/wwwauthenticate.go | 159 +++ .../containers/image/v4/image/docker_list.go | 94 ++ .../containers/image/v4/image/docker_schema1.go | 202 ++++ .../containers/image/v4/image/docker_schema2.go | 357 ++++++ .../containers/image/v4/image/manifest.go | 73 ++ .../github.com/containers/image/v4/image/memory.go | 65 ++ vendor/github.com/containers/image/v4/image/oci.go | 214 ++++ .../containers/image/v4/image/sourced.go | 104 ++ .../containers/image/v4/image/unparsed.go | 95 ++ .../containers/image/v4/internal/pkg/keyctl/key.go | 73 ++ .../image/v4/internal/pkg/keyctl/keyring.go | 120 ++ .../image/v4/internal/pkg/keyctl/perm.go | 33 + .../image/v4/internal/pkg/keyctl/sys_linux.go | 25 + .../containers/image/v4/internal/tmpdir/tmpdir.go | 29 + .../containers/image/v4/manifest/docker_schema1.go | 316 ++++++ .../containers/image/v4/manifest/docker_schema2.go | 349 ++++++ .../containers/image/v4/manifest/manifest.go | 257 +++++ .../github.com/containers/image/v4/manifest/oci.go | 243 ++++ .../containers/image/v4/oci/archive/oci_dest.go | 151 +++ .../containers/image/v4/oci/archive/oci_src.go | 102 ++ .../image/v4/oci/archive/oci_transport.go | 192 ++++ .../containers/image/v4/oci/internal/oci_util.go | 126 +++ .../containers/image/v4/oci/layout/oci_dest.go | 306 +++++ .../containers/image/v4/oci/layout/oci_src.go | 171 +++ .../image/v4/oci/layout/oci_transport.go | 264 +++++ .../image/v4/openshift/openshift-copies.go | 1170 +++++++++++++++++++ .../containers/image/v4/openshift/openshift.go | 562 ++++++++++ .../image/v4/openshift/openshift_transport.go | 157 +++ .../containers/image/v4/ostree/ostree_dest.go | 504 +++++++++ .../containers/image/v4/ostree/ostree_src.go | 416 +++++++ .../containers/image/v4/ostree/ostree_transport.go | 252 +++++ .../image/v4/pkg/blobinfocache/boltdb/boltdb.go | 332 ++++++ .../image/v4/pkg/blobinfocache/default.go | 75 ++ .../internal/prioritize/prioritize.go | 110 ++ .../image/v4/pkg/blobinfocache/memory/memory.go | 145 +++ .../image/v4/pkg/blobinfocache/none/none.go | 49 + .../image/v4/pkg/compression/compression.go | 149 +++ .../image/v4/pkg/compression/internal/types.go | 57 + .../image/v4/pkg/compression/types/types.go | 13 + .../containers/image/v4/pkg/compression/zstd.go | 59 + .../image/v4/pkg/docker/config/config.go | 352 ++++++ .../image/v4/pkg/docker/config/config_linux.go | 115 ++ .../v4/pkg/docker/config/config_unsupported.go | 20 + .../containers/image/v4/pkg/strslice/README.md | 1 + .../containers/image/v4/pkg/strslice/strslice.go | 30 + .../v4/pkg/sysregistriesv2/system_registries_v2.go | 483 ++++++++ .../v4/pkg/tlsclientconfig/tlsclientconfig.go | 112 ++ .../containers/image/v4/signature/docker.go | 65 ++ .../containers/image/v4/signature/json.go | 88 ++ .../containers/image/v4/signature/mechanism.go | 85 ++ .../image/v4/signature/mechanism_gpgme.go | 175 +++ .../image/v4/signature/mechanism_openpgp.go | 159 +++ .../containers/image/v4/signature/policy_config.go | 688 ++++++++++++ .../containers/image/v4/signature/policy_eval.go | 289 +++++ .../image/v4/signature/policy_eval_baselayer.go | 20 + .../image/v4/signature/policy_eval_signedby.go | 131 +++ .../image/v4/signature/policy_eval_simple.go | 29 + .../image/v4/signature/policy_reference_match.go | 101 ++ .../containers/image/v4/signature/policy_types.go | 152 +++ .../containers/image/v4/signature/signature.go | 280 +++++ .../containers/image/v4/storage/storage_image.go | 956 ++++++++++++++++ .../image/v4/storage/storage_reference.go | 225 ++++ .../image/v4/storage/storage_transport.go | 366 ++++++ .../github.com/containers/image/v4/tarball/doc.go | 48 + .../image/v4/tarball/tarball_reference.go | 94 ++ .../containers/image/v4/tarball/tarball_src.go | 268 +++++ .../image/v4/tarball/tarball_transport.go | 66 ++ .../v4/transports/alltransports/alltransports.go | 46 + .../v4/transports/alltransports/docker_daemon.go | 8 + .../transports/alltransports/docker_daemon_stub.go | 9 + .../image/v4/transports/alltransports/ostree.go | 8 + .../v4/transports/alltransports/ostree_stub.go | 9 + .../image/v4/transports/alltransports/storage.go | 8 + .../v4/transports/alltransports/storage_stub.go | 9 + .../containers/image/v4/transports/stub.go | 36 + .../containers/image/v4/transports/transports.go | 90 ++ .../github.com/containers/image/v4/types/types.go | 535 +++++++++ .../containers/image/v4/version/version.go | 18 + .../github.com/containers/image/version/version.go | 18 - .../github.com/containers/libtrust/CONTRIBUTING.md | 13 + vendor/github.com/containers/libtrust/LICENSE | 191 ++++ vendor/github.com/containers/libtrust/MAINTAINERS | 3 + vendor/github.com/containers/libtrust/README.md | 22 + .../github.com/containers/libtrust/certificates.go | 175 +++ vendor/github.com/containers/libtrust/doc.go | 9 + vendor/github.com/containers/libtrust/ec_key.go | 422 +++++++ .../containers/libtrust/ec_key_no_openssl.go | 23 + .../containers/libtrust/ec_key_openssl.go | 24 + vendor/github.com/containers/libtrust/filter.go | 50 + vendor/github.com/containers/libtrust/hash.go | 56 + vendor/github.com/containers/libtrust/jsonsign.go | 657 +++++++++++ vendor/github.com/containers/libtrust/key.go | 253 +++++ vendor/github.com/containers/libtrust/key_files.go | 255 +++++ .../github.com/containers/libtrust/key_manager.go | 175 +++ vendor/github.com/containers/libtrust/rsa_key.go | 427 +++++++ vendor/github.com/containers/libtrust/util.go | 363 ++++++ vendor/github.com/docker/docker/AUTHORS | 82 +- vendor/github.com/docker/docker/api/common.go | 2 +- vendor/github.com/docker/docker/api/swagger.yaml | 174 ++- .../github.com/docker/docker/api/types/client.go | 9 + .../docker/docker/api/types/container/config.go | 2 +- .../api/types/container/container_changes.go | 2 +- .../docker/api/types/container/container_create.go | 2 +- .../docker/api/types/container/container_top.go | 2 +- .../docker/api/types/container/container_update.go | 2 +- .../docker/api/types/container/container_wait.go | 2 +- .../docker/api/types/container/host_config.go | 42 +- .../docker/docker/api/types/error_response_ext.go | 6 + .../docker/docker/api/types/filters/parse.go | 51 - .../docker/docker/api/types/image/image_history.go | 2 +- .../docker/docker/api/types/swarm/container.go | 13 +- .../docker/api/types/swarm/runtime/plugin.pb.go | 110 +- .../docker/api/types/swarm/runtime/plugin.proto | 1 + .../docker/docker/api/types/swarm/swarm.go | 2 + vendor/github.com/docker/docker/api/types/types.go | 2 +- .../docker/api/types/volume/volume_create.go | 2 +- .../docker/docker/api/types/volume/volume_list.go | 2 +- .../docker/docker/client/build_cancel.go | 8 +- .../github.com/docker/docker/client/build_prune.go | 2 +- .../docker/docker/client/checkpoint_list.go | 2 +- vendor/github.com/docker/docker/client/client.go | 48 +- .../docker/docker/client/config_create.go | 2 +- .../docker/docker/client/config_inspect.go | 2 +- .../github.com/docker/docker/client/config_list.go | 2 +- .../docker/docker/client/config_remove.go | 2 +- .../docker/docker/client/container_commit.go | 2 +- .../docker/docker/client/container_copy.go | 6 +- .../docker/docker/client/container_create.go | 6 +- .../docker/docker/client/container_diff.go | 2 +- .../docker/docker/client/container_exec.go | 2 +- .../docker/docker/client/container_inspect.go | 4 +- .../docker/docker/client/container_list.go | 2 +- .../docker/docker/client/container_prune.go | 2 +- .../docker/docker/client/container_remove.go | 2 +- .../docker/docker/client/container_top.go | 2 +- .../docker/docker/client/container_update.go | 3 +- .../github.com/docker/docker/client/disk_usage.go | 2 +- .../docker/docker/client/distribution_inspect.go | 2 +- vendor/github.com/docker/docker/client/errors.go | 28 +- vendor/github.com/docker/docker/client/hijack.go | 13 +- .../github.com/docker/docker/client/image_build.go | 8 + .../docker/docker/client/image_history.go | 2 +- .../docker/docker/client/image_inspect.go | 2 +- .../github.com/docker/docker/client/image_list.go | 2 +- .../github.com/docker/docker/client/image_prune.go | 2 +- .../github.com/docker/docker/client/image_pull.go | 4 +- .../github.com/docker/docker/client/image_push.go | 4 +- .../docker/docker/client/image_remove.go | 2 +- .../docker/docker/client/image_search.go | 6 +- vendor/github.com/docker/docker/client/info.go | 2 +- .../github.com/docker/docker/client/interface.go | 2 +- vendor/github.com/docker/docker/client/login.go | 6 +- .../docker/docker/client/network_create.go | 4 +- .../docker/docker/client/network_inspect.go | 2 +- .../docker/docker/client/network_list.go | 2 +- .../docker/docker/client/network_prune.go | 2 +- .../docker/docker/client/network_remove.go | 2 +- .../docker/docker/client/node_inspect.go | 2 +- .../github.com/docker/docker/client/node_list.go | 2 +- .../github.com/docker/docker/client/node_remove.go | 2 +- vendor/github.com/docker/docker/client/options.go | 52 +- vendor/github.com/docker/docker/client/ping.go | 15 +- .../docker/docker/client/plugin_create.go | 3 - .../docker/docker/client/plugin_inspect.go | 2 +- .../docker/docker/client/plugin_install.go | 4 +- .../github.com/docker/docker/client/plugin_list.go | 2 +- .../docker/docker/client/plugin_remove.go | 2 +- vendor/github.com/docker/docker/client/request.go | 8 +- .../docker/docker/client/secret_create.go | 2 +- .../docker/docker/client/secret_inspect.go | 2 +- .../github.com/docker/docker/client/secret_list.go | 2 +- .../docker/docker/client/secret_remove.go | 2 +- .../docker/docker/client/service_create.go | 2 +- .../docker/docker/client/service_inspect.go | 2 +- .../docker/docker/client/service_list.go | 2 +- .../docker/docker/client/service_remove.go | 2 +- .../docker/docker/client/service_update.go | 2 +- vendor/github.com/docker/docker/client/session.go | 18 - .../docker/docker/client/swarm_get_unlock_key.go | 2 +- .../github.com/docker/docker/client/swarm_init.go | 2 +- .../docker/docker/client/swarm_inspect.go | 2 +- .../docker/docker/client/task_inspect.go | 2 +- .../github.com/docker/docker/client/task_list.go | 2 +- vendor/github.com/docker/docker/client/version.go | 2 +- .../docker/docker/client/volume_create.go | 2 +- .../docker/docker/client/volume_inspect.go | 2 +- .../github.com/docker/docker/client/volume_list.go | 2 +- .../docker/docker/client/volume_prune.go | 2 +- .../docker/docker/client/volume_remove.go | 2 +- vendor/github.com/docker/docker/errdefs/defs.go | 5 - vendor/github.com/docker/docker/errdefs/helpers.go | 16 - .../docker/docker/errdefs/http_helpers.go | 172 +++ vendor/github.com/docker/docker/errdefs/is.go | 7 - .../docker/docker/pkg/idtools/idtools.go | 3 - .../docker/docker/pkg/ioutils/bytespipe.go | 3 +- vendor/github.com/docker/docker/pkg/mount/mount.go | 4 +- .../docker/docker/pkg/mount/mountinfo_freebsd.go | 5 +- .../docker/docker/pkg/mount/mountinfo_linux.go | 18 +- .../docker/docker/pkg/mount/sharedsubtree_linux.go | 24 +- .../docker/pkg/namesgenerator/names-generator.go | 16 +- .../docker/docker/pkg/system/args_windows.go | 16 + .../docker/docker/pkg/system/filesys_windows.go | 2 - .../docker/docker/pkg/system/init_unix.go | 5 + .../docker/docker/pkg/system/init_windows.go | 37 +- .../docker/docker/profiles/seccomp/default.json | 5 +- .../docker/profiles/seccomp/seccomp_default.go | 5 +- vendor/github.com/docker/go-metrics/go.mod | 5 + vendor/github.com/docker/go-metrics/go.sum | 67 ++ vendor/github.com/docker/libtrust/CONTRIBUTING.md | 13 - vendor/github.com/docker/libtrust/LICENSE | 191 ---- vendor/github.com/docker/libtrust/MAINTAINERS | 3 - vendor/github.com/docker/libtrust/README.md | 22 - vendor/github.com/docker/libtrust/certificates.go | 175 --- vendor/github.com/docker/libtrust/doc.go | 9 - vendor/github.com/docker/libtrust/ec_key.go | 428 ------- vendor/github.com/docker/libtrust/filter.go | 50 - vendor/github.com/docker/libtrust/hash.go | 56 - vendor/github.com/docker/libtrust/jsonsign.go | 657 ----------- vendor/github.com/docker/libtrust/key.go | 253 ----- vendor/github.com/docker/libtrust/key_files.go | 255 ----- vendor/github.com/docker/libtrust/key_manager.go | 175 --- vendor/github.com/docker/libtrust/rsa_key.go | 427 ------- vendor/github.com/docker/libtrust/util.go | 363 ------ .../github.com/fsouza/go-dockerclient/.travis.yml | 14 +- vendor/github.com/fsouza/go-dockerclient/AUTHORS | 3 + .../github.com/fsouza/go-dockerclient/Gopkg.toml | 23 - vendor/github.com/fsouza/go-dockerclient/Makefile | 7 - vendor/github.com/fsouza/go-dockerclient/README.md | 20 +- .../github.com/fsouza/go-dockerclient/appveyor.yml | 20 +- vendor/github.com/fsouza/go-dockerclient/auth.go | 2 + vendor/github.com/fsouza/go-dockerclient/client.go | 85 +- .../github.com/fsouza/go-dockerclient/container.go | 1 + vendor/github.com/fsouza/go-dockerclient/go.mod | 18 +- vendor/github.com/fsouza/go-dockerclient/go.sum | 59 +- vendor/github.com/fsouza/go-dockerclient/image.go | 42 +- .../github.com/fsouza/go-dockerclient/network.go | 12 + .../github.com/golang/protobuf/proto/properties.go | 5 +- vendor/github.com/golang/protobuf/ptypes/any.go | 141 +++ .../golang/protobuf/ptypes/any/any.pb.go | 200 ++++ .../golang/protobuf/ptypes/any/any.proto | 154 +++ vendor/github.com/golang/protobuf/ptypes/doc.go | 35 + .../github.com/golang/protobuf/ptypes/duration.go | 102 ++ .../golang/protobuf/ptypes/duration/duration.pb.go | 161 +++ .../golang/protobuf/ptypes/duration/duration.proto | 117 ++ .../github.com/golang/protobuf/ptypes/timestamp.go | 132 +++ .../protobuf/ptypes/timestamp/timestamp.pb.go | 179 +++ .../protobuf/ptypes/timestamp/timestamp.proto | 135 +++ vendor/github.com/gorilla/mux/.travis.yml | 24 - vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md | 11 - vendor/github.com/gorilla/mux/README.md | 69 ++ vendor/github.com/gorilla/mux/doc.go | 2 +- vendor/github.com/gorilla/mux/middleware.go | 61 +- vendor/github.com/klauspost/compress/LICENSE | 1 + .../klauspost/compress/fse/decompress.go | 8 +- .../klauspost/compress/huff0/decompress.go | 31 +- .../github.com/klauspost/compress/huff0/huff0.go | 11 + .../klauspost/compress/snappy/decode_amd64.s | 16 +- .../klauspost/compress/snappy/decode_other.go | 22 +- .../github.com/klauspost/compress/zstd/README.md | 6 +- .../github.com/klauspost/compress/zstd/blockdec.go | 13 +- .../github.com/klauspost/compress/zstd/blockenc.go | 60 +- .../github.com/klauspost/compress/zstd/bytebuf.go | 5 +- .../github.com/klauspost/compress/zstd/decoder.go | 51 +- .../klauspost/compress/zstd/decoder_options.go | 10 +- .../klauspost/compress/zstd/enc_dfast.go | 71 +- .../github.com/klauspost/compress/zstd/enc_fast.go | 17 +- .../github.com/klauspost/compress/zstd/encoder.go | 16 +- .../klauspost/compress/zstd/encoder_options.go | 2 +- .../github.com/klauspost/compress/zstd/framedec.go | 30 +- .../klauspost/compress/zstd/fse_decoder.go | 113 +- vendor/github.com/klauspost/compress/zstd/hash.go | 2 +- .../github.com/klauspost/compress/zstd/seqdec.go | 103 +- vendor/github.com/mattn/go-shellwords/.travis.yml | 12 +- vendor/github.com/mattn/go-shellwords/README.md | 2 +- vendor/github.com/mattn/go-shellwords/go.test.sh | 12 + .../github.com/mattn/go-shellwords/shellwords.go | 16 +- vendor/github.com/mattn/go-shellwords/util_go15.go | 11 +- .../github.com/mattn/go-shellwords/util_posix.go | 8 +- .../github.com/mattn/go-shellwords/util_windows.go | 8 +- .../image-spec/specs-go/v1/mediatype.go | 9 + .../opencontainers/image-spec/specs-go/version.go | 2 +- .../github.com/pquerna/ffjson/fflib/v1/reader.go | 2 +- .../github.com/prometheus/procfs/Makefile.common | 7 +- vendor/github.com/prometheus/procfs/fixtures.ttar | 551 ++++++++- vendor/github.com/prometheus/procfs/go.mod | 5 +- vendor/github.com/prometheus/procfs/go.sum | 2 + .../github.com/prometheus/procfs/internal/fs/fs.go | 3 + vendor/github.com/prometheus/procfs/mdstat.go | 111 +- vendor/github.com/prometheus/procfs/mountinfo.go | 178 +++ vendor/github.com/prometheus/procfs/proc.go | 14 + .../github.com/prometheus/procfs/proc_environ.go | 43 + vendor/github.com/prometheus/procfs/proc_stat.go | 2 +- vendor/github.com/xeipuuv/gojsonpointer/pointer.go | 8 +- vendor/golang.org/x/sys/unix/affinity_linux.go | 42 +- vendor/golang.org/x/sys/unix/dirent.go | 2 +- vendor/golang.org/x/sys/unix/endian_little.go | 2 +- vendor/golang.org/x/sys/unix/ioctl.go | 41 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 5 + vendor/golang.org/x/sys/unix/syscall_aix.go | 39 +- vendor/golang.org/x/sys/unix/syscall_darwin.go | 38 - vendor/golang.org/x/sys/unix/syscall_darwin_386.go | 2 + .../golang.org/x/sys/unix/syscall_darwin_amd64.go | 2 + vendor/golang.org/x/sys/unix/syscall_darwin_arm.go | 4 + .../golang.org/x/sys/unix/syscall_darwin_arm64.go | 4 + vendor/golang.org/x/sys/unix/syscall_dragonfly.go | 37 - vendor/golang.org/x/sys/unix/syscall_freebsd.go | 37 - vendor/golang.org/x/sys/unix/syscall_linux.go | 45 +- vendor/golang.org/x/sys/unix/syscall_netbsd.go | 37 - vendor/golang.org/x/sys/unix/syscall_openbsd.go | 37 - vendor/golang.org/x/sys/unix/syscall_solaris.go | 30 - vendor/golang.org/x/sys/unix/zerrors_linux_386.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_amd64.go | 40 + vendor/golang.org/x/sys/unix/zerrors_linux_arm.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_arm64.go | 40 + vendor/golang.org/x/sys/unix/zerrors_linux_mips.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_mips64.go | 40 + .../x/sys/unix/zerrors_linux_mips64le.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_mipsle.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_ppc64.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_ppc64le.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_riscv64.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_s390x.go | 40 + .../golang.org/x/sys/unix/zerrors_linux_sparc64.go | 40 + .../x/sys/unix/zsyscall_darwin_386.1_11.go | 20 +- .../golang.org/x/sys/unix/zsyscall_darwin_386.go | 30 +- vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s | 4 +- .../golang.org/x/sys/unix/zsyscall_darwin_amd64.go | 30 +- .../golang.org/x/sys/unix/zsyscall_darwin_amd64.s | 4 +- .../golang.org/x/sys/unix/zsyscall_darwin_arm.go | 15 - vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s | 2 - .../golang.org/x/sys/unix/zsyscall_darwin_arm64.go | 15 - .../golang.org/x/sys/unix/zsyscall_darwin_arm64.s | 2 - vendor/golang.org/x/sys/unix/zsysnum_linux_386.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_amd64.go | 6 + vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_arm64.go | 6 + vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_mips64.go | 6 + .../x/sys/unix/zsysnum_linux_mips64le.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_mipsle.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_ppc64.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_ppc64le.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_riscv64.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_s390x.go | 6 + .../golang.org/x/sys/unix/zsysnum_linux_sparc64.go | 6 + vendor/golang.org/x/sys/unix/ztypes_linux_386.go | 37 + vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go | 38 + vendor/golang.org/x/sys/unix/ztypes_linux_arm.go | 37 + vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go | 38 + vendor/golang.org/x/sys/unix/ztypes_linux_mips.go | 37 + .../golang.org/x/sys/unix/ztypes_linux_mips64.go | 38 + .../golang.org/x/sys/unix/ztypes_linux_mips64le.go | 38 + .../golang.org/x/sys/unix/ztypes_linux_mipsle.go | 37 + vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go | 38 + .../golang.org/x/sys/unix/ztypes_linux_ppc64le.go | 38 + .../golang.org/x/sys/unix/ztypes_linux_riscv64.go | 39 + vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go | 38 + .../golang.org/x/sys/unix/ztypes_linux_sparc64.go | 38 + .../golang.org/x/sys/windows/security_windows.go | 44 +- vendor/golang.org/x/sys/windows/service.go | 4 + vendor/golang.org/x/sys/windows/syscall_windows.go | 18 +- vendor/golang.org/x/sys/windows/types_windows.go | 29 +- .../golang.org/x/sys/windows/zsyscall_windows.go | 120 ++ vendor/golang.org/x/time/rate/rate.go | 17 + vendor/google.golang.org/genproto/LICENSE | 202 ++++ .../genproto/googleapis/rpc/status/status.pb.go | 163 +++ vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/LICENSE | 202 ++++ vendor/google.golang.org/grpc/codes/code_string.go | 62 ++ vendor/google.golang.org/grpc/codes/codes.go | 198 ++++ .../grpc/connectivity/connectivity.go | 73 ++ vendor/google.golang.org/grpc/grpclog/grpclog.go | 126 +++ vendor/google.golang.org/grpc/grpclog/logger.go | 85 ++ vendor/google.golang.org/grpc/grpclog/loggerv2.go | 195 ++++ vendor/google.golang.org/grpc/internal/internal.go | 71 ++ vendor/google.golang.org/grpc/status/status.go | 228 ++++ vendor/modules.txt | 130 ++- 585 files changed, 32578 insertions(+), 25243 deletions(-) create mode 100644 vendor/github.com/Microsoft/go-winio/go.mod create mode 100644 vendor/github.com/Microsoft/go-winio/go.sum create mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go delete mode 100644 vendor/github.com/containers/image/LICENSE delete mode 100644 vendor/github.com/containers/image/copy/copy.go delete mode 100644 vendor/github.com/containers/image/copy/manifest.go delete mode 100644 vendor/github.com/containers/image/copy/progress_reader.go delete mode 100644 vendor/github.com/containers/image/copy/sign.go delete mode 100644 vendor/github.com/containers/image/directory/directory_dest.go delete mode 100644 vendor/github.com/containers/image/directory/directory_src.go delete mode 100644 vendor/github.com/containers/image/directory/directory_transport.go delete mode 100644 vendor/github.com/containers/image/directory/explicitfilepath/path.go delete mode 100644 vendor/github.com/containers/image/docker/archive/dest.go delete mode 100644 vendor/github.com/containers/image/docker/archive/src.go delete mode 100644 vendor/github.com/containers/image/docker/archive/transport.go delete mode 100644 vendor/github.com/containers/image/docker/cache.go delete mode 100644 vendor/github.com/containers/image/docker/daemon/client.go delete mode 100644 vendor/github.com/containers/image/docker/daemon/daemon_dest.go delete mode 100644 vendor/github.com/containers/image/docker/daemon/daemon_src.go delete mode 100644 vendor/github.com/containers/image/docker/daemon/daemon_transport.go delete mode 100644 vendor/github.com/containers/image/docker/docker_client.go delete mode 100644 vendor/github.com/containers/image/docker/docker_image.go delete mode 100644 vendor/github.com/containers/image/docker/docker_image_dest.go delete mode 100644 vendor/github.com/containers/image/docker/docker_image_src.go delete mode 100644 vendor/github.com/containers/image/docker/docker_transport.go delete mode 100644 vendor/github.com/containers/image/docker/lookaside.go delete mode 100644 vendor/github.com/containers/image/docker/policyconfiguration/naming.go delete mode 100644 vendor/github.com/containers/image/docker/reference/README.md delete mode 100644 vendor/github.com/containers/image/docker/reference/helpers.go delete mode 100644 vendor/github.com/containers/image/docker/reference/normalize.go delete mode 100644 vendor/github.com/containers/image/docker/reference/reference.go delete mode 100644 vendor/github.com/containers/image/docker/reference/regexp.go delete mode 100644 vendor/github.com/containers/image/docker/tarfile/dest.go delete mode 100644 vendor/github.com/containers/image/docker/tarfile/doc.go delete mode 100644 vendor/github.com/containers/image/docker/tarfile/src.go delete mode 100644 vendor/github.com/containers/image/docker/tarfile/types.go delete mode 100644 vendor/github.com/containers/image/docker/wwwauthenticate.go delete mode 100644 vendor/github.com/containers/image/image/docker_list.go delete mode 100644 vendor/github.com/containers/image/image/docker_schema1.go delete mode 100644 vendor/github.com/containers/image/image/docker_schema2.go delete mode 100644 vendor/github.com/containers/image/image/manifest.go delete mode 100644 vendor/github.com/containers/image/image/memory.go delete mode 100644 vendor/github.com/containers/image/image/oci.go delete mode 100644 vendor/github.com/containers/image/image/sourced.go delete mode 100644 vendor/github.com/containers/image/image/unparsed.go delete mode 100644 vendor/github.com/containers/image/internal/tmpdir/tmpdir.go delete mode 100644 vendor/github.com/containers/image/manifest/docker_schema1.go delete mode 100644 vendor/github.com/containers/image/manifest/docker_schema2.go delete mode 100644 vendor/github.com/containers/image/manifest/manifest.go delete mode 100644 vendor/github.com/containers/image/manifest/oci.go delete mode 100644 vendor/github.com/containers/image/oci/archive/oci_dest.go delete mode 100644 vendor/github.com/containers/image/oci/archive/oci_src.go delete mode 100644 vendor/github.com/containers/image/oci/archive/oci_transport.go delete mode 100644 vendor/github.com/containers/image/oci/internal/oci_util.go delete mode 100644 vendor/github.com/containers/image/oci/layout/oci_dest.go delete mode 100644 vendor/github.com/containers/image/oci/layout/oci_src.go delete mode 100644 vendor/github.com/containers/image/oci/layout/oci_transport.go delete mode 100644 vendor/github.com/containers/image/openshift/openshift-copies.go delete mode 100644 vendor/github.com/containers/image/openshift/openshift.go delete mode 100644 vendor/github.com/containers/image/openshift/openshift_transport.go delete mode 100644 vendor/github.com/containers/image/ostree/ostree_dest.go delete mode 100644 vendor/github.com/containers/image/ostree/ostree_src.go delete mode 100644 vendor/github.com/containers/image/ostree/ostree_transport.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/default.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/none/none.go delete mode 100644 vendor/github.com/containers/image/pkg/compression/compression.go delete mode 100644 vendor/github.com/containers/image/pkg/docker/config/config.go delete mode 100644 vendor/github.com/containers/image/pkg/docker/config/config_linux.go delete mode 100644 vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go delete mode 100644 vendor/github.com/containers/image/pkg/keyctl/key.go delete mode 100644 vendor/github.com/containers/image/pkg/keyctl/keyring.go delete mode 100644 vendor/github.com/containers/image/pkg/keyctl/perm.go delete mode 100644 vendor/github.com/containers/image/pkg/keyctl/sys_linux.go delete mode 100644 vendor/github.com/containers/image/pkg/strslice/README.md delete mode 100644 vendor/github.com/containers/image/pkg/strslice/strslice.go delete mode 100644 vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go delete mode 100644 vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go delete mode 100644 vendor/github.com/containers/image/signature/docker.go delete mode 100644 vendor/github.com/containers/image/signature/json.go delete mode 100644 vendor/github.com/containers/image/signature/mechanism.go delete mode 100644 vendor/github.com/containers/image/signature/mechanism_gpgme.go delete mode 100644 vendor/github.com/containers/image/signature/mechanism_openpgp.go delete mode 100644 vendor/github.com/containers/image/signature/policy_config.go delete mode 100644 vendor/github.com/containers/image/signature/policy_eval.go delete mode 100644 vendor/github.com/containers/image/signature/policy_eval_baselayer.go delete mode 100644 vendor/github.com/containers/image/signature/policy_eval_signedby.go delete mode 100644 vendor/github.com/containers/image/signature/policy_eval_simple.go delete mode 100644 vendor/github.com/containers/image/signature/policy_reference_match.go delete mode 100644 vendor/github.com/containers/image/signature/policy_types.go delete mode 100644 vendor/github.com/containers/image/signature/signature.go delete mode 100644 vendor/github.com/containers/image/storage/storage_image.go delete mode 100644 vendor/github.com/containers/image/storage/storage_reference.go delete mode 100644 vendor/github.com/containers/image/storage/storage_transport.go delete mode 100644 vendor/github.com/containers/image/tarball/doc.go delete mode 100644 vendor/github.com/containers/image/tarball/tarball_reference.go delete mode 100644 vendor/github.com/containers/image/tarball/tarball_src.go delete mode 100644 vendor/github.com/containers/image/tarball/tarball_transport.go delete mode 100644 vendor/github.com/containers/image/transports/alltransports/alltransports.go delete mode 100644 vendor/github.com/containers/image/transports/alltransports/docker_daemon.go delete mode 100644 vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go delete mode 100644 vendor/github.com/containers/image/transports/alltransports/ostree.go delete mode 100644 vendor/github.com/containers/image/transports/alltransports/ostree_stub.go delete mode 100644 vendor/github.com/containers/image/transports/alltransports/storage.go delete mode 100644 vendor/github.com/containers/image/transports/alltransports/storage_stub.go delete mode 100644 vendor/github.com/containers/image/transports/stub.go delete mode 100644 vendor/github.com/containers/image/transports/transports.go delete mode 100644 vendor/github.com/containers/image/types/types.go create mode 100644 vendor/github.com/containers/image/v4/LICENSE create mode 100644 vendor/github.com/containers/image/v4/copy/copy.go create mode 100644 vendor/github.com/containers/image/v4/copy/manifest.go create mode 100644 vendor/github.com/containers/image/v4/copy/progress_reader.go create mode 100644 vendor/github.com/containers/image/v4/copy/sign.go create mode 100644 vendor/github.com/containers/image/v4/directory/directory_dest.go create mode 100644 vendor/github.com/containers/image/v4/directory/directory_src.go create mode 100644 vendor/github.com/containers/image/v4/directory/directory_transport.go create mode 100644 vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go create mode 100644 vendor/github.com/containers/image/v4/docker/archive/dest.go create mode 100644 vendor/github.com/containers/image/v4/docker/archive/src.go create mode 100644 vendor/github.com/containers/image/v4/docker/archive/transport.go create mode 100644 vendor/github.com/containers/image/v4/docker/cache.go create mode 100644 vendor/github.com/containers/image/v4/docker/daemon/client.go create mode 100644 vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go create mode 100644 vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go create mode 100644 vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go create mode 100644 vendor/github.com/containers/image/v4/docker/docker_client.go create mode 100644 vendor/github.com/containers/image/v4/docker/docker_image.go create mode 100644 vendor/github.com/containers/image/v4/docker/docker_image_dest.go create mode 100644 vendor/github.com/containers/image/v4/docker/docker_image_src.go create mode 100644 vendor/github.com/containers/image/v4/docker/docker_transport.go create mode 100644 vendor/github.com/containers/image/v4/docker/lookaside.go create mode 100644 vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go create mode 100644 vendor/github.com/containers/image/v4/docker/reference/README.md create mode 100644 vendor/github.com/containers/image/v4/docker/reference/helpers.go create mode 100644 vendor/github.com/containers/image/v4/docker/reference/normalize.go create mode 100644 vendor/github.com/containers/image/v4/docker/reference/reference.go create mode 100644 vendor/github.com/containers/image/v4/docker/reference/regexp.go create mode 100644 vendor/github.com/containers/image/v4/docker/tarfile/dest.go create mode 100644 vendor/github.com/containers/image/v4/docker/tarfile/doc.go create mode 100644 vendor/github.com/containers/image/v4/docker/tarfile/src.go create mode 100644 vendor/github.com/containers/image/v4/docker/tarfile/types.go create mode 100644 vendor/github.com/containers/image/v4/docker/wwwauthenticate.go create mode 100644 vendor/github.com/containers/image/v4/image/docker_list.go create mode 100644 vendor/github.com/containers/image/v4/image/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v4/image/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v4/image/manifest.go create mode 100644 vendor/github.com/containers/image/v4/image/memory.go create mode 100644 vendor/github.com/containers/image/v4/image/oci.go create mode 100644 vendor/github.com/containers/image/v4/image/sourced.go create mode 100644 vendor/github.com/containers/image/v4/image/unparsed.go create mode 100644 vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go create mode 100644 vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go create mode 100644 vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go create mode 100644 vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go create mode 100644 vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go create mode 100644 vendor/github.com/containers/image/v4/manifest/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v4/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v4/manifest/manifest.go create mode 100644 vendor/github.com/containers/image/v4/manifest/oci.go create mode 100644 vendor/github.com/containers/image/v4/oci/archive/oci_dest.go create mode 100644 vendor/github.com/containers/image/v4/oci/archive/oci_src.go create mode 100644 vendor/github.com/containers/image/v4/oci/archive/oci_transport.go create mode 100644 vendor/github.com/containers/image/v4/oci/internal/oci_util.go create mode 100644 vendor/github.com/containers/image/v4/oci/layout/oci_dest.go create mode 100644 vendor/github.com/containers/image/v4/oci/layout/oci_src.go create mode 100644 vendor/github.com/containers/image/v4/oci/layout/oci_transport.go create mode 100644 vendor/github.com/containers/image/v4/openshift/openshift-copies.go create mode 100644 vendor/github.com/containers/image/v4/openshift/openshift.go create mode 100644 vendor/github.com/containers/image/v4/openshift/openshift_transport.go create mode 100644 vendor/github.com/containers/image/v4/ostree/ostree_dest.go create mode 100644 vendor/github.com/containers/image/v4/ostree/ostree_src.go create mode 100644 vendor/github.com/containers/image/v4/ostree/ostree_transport.go create mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go create mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go create mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go create mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go create mode 100644 vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go create mode 100644 vendor/github.com/containers/image/v4/pkg/compression/compression.go create mode 100644 vendor/github.com/containers/image/v4/pkg/compression/internal/types.go create mode 100644 vendor/github.com/containers/image/v4/pkg/compression/types/types.go create mode 100644 vendor/github.com/containers/image/v4/pkg/compression/zstd.go create mode 100644 vendor/github.com/containers/image/v4/pkg/docker/config/config.go create mode 100644 vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go create mode 100644 vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go create mode 100644 vendor/github.com/containers/image/v4/pkg/strslice/README.md create mode 100644 vendor/github.com/containers/image/v4/pkg/strslice/strslice.go create mode 100644 vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go create mode 100644 vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go create mode 100644 vendor/github.com/containers/image/v4/signature/docker.go create mode 100644 vendor/github.com/containers/image/v4/signature/json.go create mode 100644 vendor/github.com/containers/image/v4/signature/mechanism.go create mode 100644 vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go create mode 100644 vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go create mode 100644 vendor/github.com/containers/image/v4/signature/policy_config.go create mode 100644 vendor/github.com/containers/image/v4/signature/policy_eval.go create mode 100644 vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go create mode 100644 vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go create mode 100644 vendor/github.com/containers/image/v4/signature/policy_eval_simple.go create mode 100644 vendor/github.com/containers/image/v4/signature/policy_reference_match.go create mode 100644 vendor/github.com/containers/image/v4/signature/policy_types.go create mode 100644 vendor/github.com/containers/image/v4/signature/signature.go create mode 100644 vendor/github.com/containers/image/v4/storage/storage_image.go create mode 100644 vendor/github.com/containers/image/v4/storage/storage_reference.go create mode 100644 vendor/github.com/containers/image/v4/storage/storage_transport.go create mode 100644 vendor/github.com/containers/image/v4/tarball/doc.go create mode 100644 vendor/github.com/containers/image/v4/tarball/tarball_reference.go create mode 100644 vendor/github.com/containers/image/v4/tarball/tarball_src.go create mode 100644 vendor/github.com/containers/image/v4/tarball/tarball_transport.go create mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go create mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go create mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go create mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/ostree.go create mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go create mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/storage.go create mode 100644 vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go create mode 100644 vendor/github.com/containers/image/v4/transports/stub.go create mode 100644 vendor/github.com/containers/image/v4/transports/transports.go create mode 100644 vendor/github.com/containers/image/v4/types/types.go create mode 100644 vendor/github.com/containers/image/v4/version/version.go delete mode 100644 vendor/github.com/containers/image/version/version.go create mode 100644 vendor/github.com/containers/libtrust/CONTRIBUTING.md create mode 100644 vendor/github.com/containers/libtrust/LICENSE create mode 100644 vendor/github.com/containers/libtrust/MAINTAINERS create mode 100644 vendor/github.com/containers/libtrust/README.md create mode 100644 vendor/github.com/containers/libtrust/certificates.go create mode 100644 vendor/github.com/containers/libtrust/doc.go create mode 100644 vendor/github.com/containers/libtrust/ec_key.go create mode 100644 vendor/github.com/containers/libtrust/ec_key_no_openssl.go create mode 100644 vendor/github.com/containers/libtrust/ec_key_openssl.go create mode 100644 vendor/github.com/containers/libtrust/filter.go create mode 100644 vendor/github.com/containers/libtrust/hash.go create mode 100644 vendor/github.com/containers/libtrust/jsonsign.go create mode 100644 vendor/github.com/containers/libtrust/key.go create mode 100644 vendor/github.com/containers/libtrust/key_files.go create mode 100644 vendor/github.com/containers/libtrust/key_manager.go create mode 100644 vendor/github.com/containers/libtrust/rsa_key.go create mode 100644 vendor/github.com/containers/libtrust/util.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response_ext.go delete mode 100644 vendor/github.com/docker/docker/client/session.go create mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go create mode 100644 vendor/github.com/docker/docker/pkg/system/args_windows.go create mode 100644 vendor/github.com/docker/go-metrics/go.mod create mode 100644 vendor/github.com/docker/go-metrics/go.sum delete mode 100644 vendor/github.com/docker/libtrust/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/libtrust/LICENSE delete mode 100644 vendor/github.com/docker/libtrust/MAINTAINERS delete mode 100644 vendor/github.com/docker/libtrust/README.md delete mode 100644 vendor/github.com/docker/libtrust/certificates.go delete mode 100644 vendor/github.com/docker/libtrust/doc.go delete mode 100644 vendor/github.com/docker/libtrust/ec_key.go delete mode 100644 vendor/github.com/docker/libtrust/filter.go delete mode 100644 vendor/github.com/docker/libtrust/hash.go delete mode 100644 vendor/github.com/docker/libtrust/jsonsign.go delete mode 100644 vendor/github.com/docker/libtrust/key.go delete mode 100644 vendor/github.com/docker/libtrust/key_files.go delete mode 100644 vendor/github.com/docker/libtrust/key_manager.go delete mode 100644 vendor/github.com/docker/libtrust/rsa_key.go delete mode 100644 vendor/github.com/docker/libtrust/util.go delete mode 100644 vendor/github.com/fsouza/go-dockerclient/Gopkg.toml create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto delete mode 100644 vendor/github.com/gorilla/mux/.travis.yml delete mode 100644 vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/mattn/go-shellwords/go.test.sh create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/status/status.go diff --git a/cmd/podman/login.go b/cmd/podman/login.go index 36262fd4d..96b4ac2a2 100644 --- a/cmd/podman/login.go +++ b/cmd/podman/login.go @@ -6,9 +6,9 @@ import ( "os" "strings" - "github.com/containers/image/docker" - "github.com/containers/image/pkg/docker/config" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker" + "github.com/containers/image/v4/pkg/docker/config" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod/image" diff --git a/cmd/podman/logout.go b/cmd/podman/logout.go index 66dc82363..6d6db4b41 100644 --- a/cmd/podman/logout.go +++ b/cmd/podman/logout.go @@ -3,8 +3,8 @@ package main import ( "fmt" - "github.com/containers/image/docker" - "github.com/containers/image/pkg/docker/config" + "github.com/containers/image/v4/docker" + "github.com/containers/image/v4/pkg/docker/config" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod/image" diff --git a/cmd/podman/pull.go b/cmd/podman/pull.go index 53f133929..f8a658297 100644 --- a/cmd/podman/pull.go +++ b/cmd/podman/pull.go @@ -6,10 +6,10 @@ import ( "os" "strings" - "github.com/containers/image/docker" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker" + dockerarchive "github.com/containers/image/v4/docker/archive" + "github.com/containers/image/v4/transports/alltransports" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod/image" diff --git a/cmd/podman/push.go b/cmd/podman/push.go index 52fbc652e..36c4988a1 100644 --- a/cmd/podman/push.go +++ b/cmd/podman/push.go @@ -6,9 +6,9 @@ import ( "os" "strings" - "github.com/containers/image/directory" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v4/directory" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod/image" diff --git a/cmd/podman/runlabel.go b/cmd/podman/runlabel.go index db6d390d5..0369612b9 100644 --- a/cmd/podman/runlabel.go +++ b/cmd/podman/runlabel.go @@ -6,7 +6,7 @@ import ( "os" "strings" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/cmd/podman/shared" diff --git a/cmd/podman/search.go b/cmd/podman/search.go index f4c51bff1..9dad69297 100644 --- a/cmd/podman/search.go +++ b/cmd/podman/search.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/containers/buildah/pkg/formats" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod/image" diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go index 5122d37d1..022377b1f 100644 --- a/cmd/podman/shared/container.go +++ b/cmd/podman/shared/container.go @@ -13,7 +13,7 @@ import ( "sync" "time" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/image" diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go index fc8197721..c9b05d2c4 100644 --- a/cmd/podman/shared/create.go +++ b/cmd/podman/shared/create.go @@ -12,7 +12,7 @@ import ( "syscall" "time" - "github.com/containers/image/manifest" + "github.com/containers/image/v4/manifest" "github.com/containers/libpod/cmd/podman/shared/parse" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/image" diff --git a/cmd/podman/sign.go b/cmd/podman/sign.go index 79bc3f02b..b6e82ba0b 100644 --- a/cmd/podman/sign.go +++ b/cmd/podman/sign.go @@ -8,9 +8,9 @@ import ( "strconv" "strings" - "github.com/containers/image/signature" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" + "github.com/containers/image/v4/signature" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/transports/alltransports" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/libpod/image" diff --git a/contrib/perftest/main.go b/contrib/perftest/main.go index 9b928a6b3..463c35ec2 100644 --- a/contrib/perftest/main.go +++ b/contrib/perftest/main.go @@ -9,7 +9,7 @@ import ( "text/tabwriter" "time" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/containers/libpod/libpod" image2 "github.com/containers/libpod/libpod/image" cc "github.com/containers/libpod/pkg/spec" diff --git a/go.mod b/go.mod index acf81255f..40a05553c 100644 --- a/go.mod +++ b/go.mod @@ -3,17 +3,19 @@ module github.com/containers/libpod go 1.12 require ( - github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 // indirect github.com/BurntSushi/toml v0.3.1 + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37 github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect + github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50 // indirect github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect github.com/containernetworking/cni v0.7.1 github.com/containernetworking/plugins v0.8.2 - github.com/containers/buildah v1.11.2 - github.com/containers/image v3.0.2+incompatible + github.com/containers/buildah v1.11.3 + github.com/containers/image v3.0.2+incompatible // indirect + github.com/containers/image/v4 v4.0.1 github.com/containers/psgo v1.3.1 github.com/containers/storage v1.13.4 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f @@ -22,10 +24,9 @@ require ( github.com/cyphar/filepath-securejoin v0.2.2 github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution v2.7.1+incompatible - github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16 + github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b github.com/docker/docker-credential-helpers v0.6.3 github.com/docker/go-connections v0.4.0 - github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect github.com/docker/go-units v0.4.0 github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect @@ -33,7 +34,6 @@ require ( github.com/etcd-io/bbolt v1.3.3 github.com/fatih/camelcase v1.0.0 github.com/fsnotify/fsnotify v1.4.7 - github.com/fsouza/go-dockerclient v1.4.1 // indirect github.com/ghodss/yaml v1.0.0 github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf @@ -41,14 +41,13 @@ require ( github.com/hpcloud/tail v1.0.0 github.com/imdario/mergo v0.3.7 // indirect github.com/json-iterator/go v1.1.7 - github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/mattn/go-isatty v0.0.8 // indirect - github.com/morikuni/aec v1.0.0 // indirect + github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9 // indirect github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 github.com/onsi/ginkgo v1.10.1 github.com/onsi/gomega v1.7.0 github.com/opencontainers/go-digest v1.0.0-rc1 - github.com/opencontainers/image-spec v1.0.1 + github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 github.com/opencontainers/runtime-tools v0.9.0 @@ -57,9 +56,7 @@ require ( github.com/pkg/errors v0.8.1 github.com/pkg/profile v1.3.0 github.com/pmezard/go-difflib v1.0.0 - github.com/prometheus/common v0.6.0 // indirect github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f // indirect - github.com/seccomp/libseccomp-golang v0.9.1 // indirect github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.5 @@ -68,20 +65,16 @@ require ( github.com/uber-go/atomic v1.4.0 // indirect github.com/uber/jaeger-client-go v2.19.0+incompatible github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 // indirect - github.com/ulikunitz/xz v0.5.6 // indirect + github.com/urfave/cli v1.21.0 // indirect github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b github.com/vishvananda/netlink v1.0.0 - github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect - go.etcd.io/bbolt v1.3.3 // indirect go.uber.org/atomic v1.4.0 // indirect - golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect golang.org/x/sync v0.0.0-20190423024810-112230192c58 - golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect + golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 google.golang.org/appengine v1.6.1 // indirect google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 // indirect - google.golang.org/grpc v1.21.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.2.4 k8s.io/api v0.0.0-20190813020757-36bff7324fb7 diff --git a/go.sum b/go.sum index b2ad9d155..24bcc6ed8 100644 --- a/go.sum +++ b/go.sum @@ -13,6 +13,8 @@ github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6 github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -29,6 +31,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -42,6 +46,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20180814194400-c7c5070e6f6e/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -54,8 +59,14 @@ github.com/containernetworking/plugins v0.8.2 h1:5lnwfsAYO+V7yXhysJKy3E1A2Gy9oVu github.com/containernetworking/plugins v0.8.2/go.mod h1:TxALKWZpWL79BC3GOYKJzzXr7U8R23PdhwaLp6F3adc= github.com/containers/buildah v1.11.2 h1:U6Abrp1J7H19vHvhqIran4Xvw+Z3WIqMM86fIt9L7Qk= github.com/containers/buildah v1.11.2/go.mod h1:CtnP3vsLiU3xgKvkhdb4b0IzYwXNzHRv3ezl4z+RPC0= +github.com/containers/buildah v1.11.3 h1:L5vFj+ao58IGq3G30jN94vRQrIgMU/uTOEKduDr3Nyg= +github.com/containers/buildah v1.11.3/go.mod h1:jqZmSU/PhFwTHHlOotnw4bbs1JbkRQLh8dut5DF4Qek= github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE= github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= +github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ= +github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/psgo v1.3.1 h1:1kE+jJ9Ou5f9zQT/M2IdeSclsKWsXrSFlOcnqc+F2TA= github.com/containers/psgo v1.3.1/go.mod h1:LLiRMmxZ6FWP4bB/fOUu6kDT+4okk/ZCeeykqh0O5Ns= github.com/containers/storage v1.13.2 h1:UXZ0Ckmk6+6+4vj2M2ywruVtH97pnRoAhTG8ctd+yQI= @@ -94,18 +105,25 @@ github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65/go.mod h1:J2gT github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23 h1:mJtkfC9RUrUWHMk0cFDNhVoc9U3k2FRAzEZ+5pqSIHo= github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16 h1:dmUn0SuGx7unKFwxyeQ/oLUHhEfZosEDrpmYM+6MTuc= github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU= +github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g= github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 h1:X0fj836zx99zFu83v/M79DuBn84IL/Syx1SY6Y5ZEMA= github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -139,7 +157,10 @@ github.com/fsouza/go-dockerclient v1.3.0 h1:tOXkq/5++XihrAvH5YNwCTdPeQg3XVcC6WI2 github.com/fsouza/go-dockerclient v1.3.0/go.mod h1:IN9UPc4/w7cXiARH2Yg99XxUHbAM+6rAi9hzBVbkWRU= github.com/fsouza/go-dockerclient v1.4.1 h1:W7wuJ3IB48WYZv/UBk9dCTIb9oX805+L9KIm65HcUYs= github.com/fsouza/go-dockerclient v1.4.1/go.mod h1:PUNHxbowDqRXfRgZqMz1OeGtbWC6VKyZvJ99hDjB0qs= +github.com/fsouza/go-dockerclient v1.4.4 h1:Sd5nD4wdAgiPxvrbYUzT2ZZNmPk3z+GGnZ+frvw8z04= +github.com/fsouza/go-dockerclient v1.4.4/go.mod h1:PrwszSL5fbmsESocROrOGq/NULMXRw+bajY0ltzD6MA= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -153,6 +174,7 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKoghCmop5B0TRyu/ZieziZuGiM= github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= @@ -169,10 +191,13 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -187,10 +212,14 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v0.0.0-20170217192616-94e7d24fd285/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -227,6 +256,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y= github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI= +github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= @@ -249,6 +280,8 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= @@ -291,6 +324,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0= @@ -326,9 +361,13 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 h1:kyf9snWXHvQc+yxE9imhdI8YAm4oKeZISlaAR+x73zs= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -338,6 +377,8 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U= @@ -354,6 +395,7 @@ github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe/go.mod h1:ni0Sbl8b github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -410,8 +452,11 @@ github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHS github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg= github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -428,6 +473,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 h1:ydJNl0ENAG67pFbB+9tfhiL2pYqLhfoaZFw/cjLhY4A= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -472,10 +519,15 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190425145619-16072639606e h1:4ktJgTV34+N3qOZUc5fAaG3Pb11qzMm3PkAoTAgUZ2I= golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -485,6 +537,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -493,6 +547,7 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -506,11 +561,15 @@ google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dT google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -530,12 +589,14 @@ gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= k8s.io/api v0.0.0-20190813020757-36bff7324fb7 h1:4uJOjRn9kWq4AqJRE8+qzmAy+lJd9rh8TY455dNef4U= k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 h1:pyoq062NftC1y/OcnbSvgolyZDJ8y4fmUPWMkdA6gfU= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= +k8s.io/client-go v0.0.0-20170217214107-bcde30fb7eae/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 h1:+Qf/nITucAbm09aIdxvoA+7X0BwaXmQGVoR8k7Ynk9o= k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g= diff --git a/libpod/container.go b/libpod/container.go index 3d8e58375..f36ddbd3f 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -10,7 +10,7 @@ import ( "github.com/containernetworking/cni/pkg/types" cnitypes "github.com/containernetworking/cni/pkg/types/current" - "github.com/containers/image/manifest" + "github.com/containers/image/v4/manifest" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" "github.com/containers/libpod/pkg/namespaces" diff --git a/libpod/container_commit.go b/libpod/container_commit.go index 8dfeee9b8..570d406b7 100644 --- a/libpod/container_commit.go +++ b/libpod/container_commit.go @@ -8,7 +8,7 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/util" - is "github.com/containers/image/storage" + is "github.com/containers/image/v4/storage" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 3c32a2f45..5a92b3e54 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/containers/image/manifest" + "github.com/containers/image/v4/manifest" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/driver" "github.com/containers/libpod/pkg/util" diff --git a/libpod/image/docker_registry_options.go b/libpod/image/docker_registry_options.go index 60bb3c33f..d205fe4ac 100644 --- a/libpod/image/docker_registry_options.go +++ b/libpod/image/docker_registry_options.go @@ -3,8 +3,8 @@ package image import ( "fmt" - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/types" podmanVersion "github.com/containers/libpod/version" ) diff --git a/libpod/image/image.go b/libpod/image/image.go index 855da8611..9adefb5c5 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -12,17 +12,17 @@ import ( "syscall" "time" - cp "github.com/containers/image/copy" - "github.com/containers/image/directory" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - ociarchive "github.com/containers/image/oci/archive" - is "github.com/containers/image/storage" - "github.com/containers/image/tarball" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + cp "github.com/containers/image/v4/copy" + "github.com/containers/image/v4/directory" + dockerarchive "github.com/containers/image/v4/docker/archive" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + ociarchive "github.com/containers/image/v4/oci/archive" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/tarball" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/transports/alltransports" + "github.com/containers/image/v4/types" "github.com/containers/libpod/libpod/driver" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/inspect" diff --git a/libpod/image/parts.go b/libpod/image/parts.go index dfdf0b08a..69bc44cdd 100644 --- a/libpod/image/parts.go +++ b/libpod/image/parts.go @@ -3,7 +3,7 @@ package image import ( "strings" - "github.com/containers/image/docker/reference" + "github.com/containers/image/v4/docker/reference" "github.com/pkg/errors" ) diff --git a/libpod/image/pull.go b/libpod/image/pull.go index dbf3a4ef5..36950b6f3 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -7,17 +7,17 @@ import ( "path/filepath" "strings" - cp "github.com/containers/image/copy" - "github.com/containers/image/directory" - "github.com/containers/image/docker" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/docker/tarfile" - ociarchive "github.com/containers/image/oci/archive" - oci "github.com/containers/image/oci/layout" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + cp "github.com/containers/image/v4/copy" + "github.com/containers/image/v4/directory" + "github.com/containers/image/v4/docker" + dockerarchive "github.com/containers/image/v4/docker/archive" + "github.com/containers/image/v4/docker/tarfile" + ociarchive "github.com/containers/image/v4/oci/archive" + oci "github.com/containers/image/v4/oci/layout" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/transports/alltransports" + "github.com/containers/image/v4/types" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/registries" "github.com/hashicorp/go-multierror" diff --git a/libpod/image/pull_test.go b/libpod/image/pull_test.go index 3890c5e6c..131b8b1f6 100644 --- a/libpod/image/pull_test.go +++ b/libpod/image/pull_test.go @@ -9,9 +9,9 @@ import ( "strings" "testing" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/transports/alltransports" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/stretchr/testify/assert" diff --git a/libpod/image/search.go b/libpod/image/search.go index 82ef4f75a..0313c2d6e 100644 --- a/libpod/image/search.go +++ b/libpod/image/search.go @@ -6,8 +6,8 @@ import ( "strings" "sync" - "github.com/containers/image/docker" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker" + "github.com/containers/image/v4/types" sysreg "github.com/containers/libpod/pkg/registries" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/libpod/image/utils.go b/libpod/image/utils.go index 544796a4b..e4ff1cfc4 100644 --- a/libpod/image/utils.go +++ b/libpod/image/utils.go @@ -7,10 +7,10 @@ import ( "regexp" "strings" - cp "github.com/containers/image/copy" - "github.com/containers/image/docker/reference" - "github.com/containers/image/signature" - "github.com/containers/image/types" + cp "github.com/containers/image/v4/copy" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/signature" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/pkg/errors" ) diff --git a/libpod/options.go b/libpod/options.go index 22ab22a95..ee44439ac 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -7,7 +7,7 @@ import ( "regexp" "syscall" - "github.com/containers/image/manifest" + "github.com/containers/image/v4/manifest" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/namespaces" diff --git a/libpod/runtime.go b/libpod/runtime.go index 9f3549e00..cdb5670ba 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -17,8 +17,8 @@ import ( "time" "github.com/BurntSushi/toml" - is "github.com/containers/image/storage" - "github.com/containers/image/types" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/types" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index 20dee4080..8cc501629 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -17,9 +17,9 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/containers/image/directory" - dockerarchive "github.com/containers/image/docker/archive" - ociarchive "github.com/containers/image/oci/archive" + "github.com/containers/image/v4/directory" + dockerarchive "github.com/containers/image/v4/docker/archive" + ociarchive "github.com/containers/image/v4/oci/archive" "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/libpod/storage.go b/libpod/storage.go index 0814672be..9a06c96fd 100644 --- a/libpod/storage.go +++ b/libpod/storage.go @@ -4,8 +4,8 @@ import ( "context" "time" - istorage "github.com/containers/image/storage" - "github.com/containers/image/types" + istorage "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/types" "github.com/containers/libpod/libpod/define" "github.com/containers/storage" "github.com/opencontainers/image-spec/specs-go/v1" diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go index afca4c948..51efdccc7 100644 --- a/pkg/adapter/containers.go +++ b/pkg/adapter/containers.go @@ -16,7 +16,7 @@ import ( "time" "github.com/containers/buildah" - "github.com/containers/image/manifest" + "github.com/containers/image/v4/manifest" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/cmd/podman/shared/parse" diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go index c8d57e2a2..ebaaf37ae 100644 --- a/pkg/adapter/pods.go +++ b/pkg/adapter/pods.go @@ -11,7 +11,7 @@ import ( "strings" "github.com/containers/buildah/pkg/parse" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go index fd6587505..0706d4b6a 100644 --- a/pkg/adapter/runtime.go +++ b/pkg/adapter/runtime.go @@ -14,8 +14,8 @@ import ( "github.com/containers/buildah/imagebuildah" "github.com/containers/buildah/pkg/formats" "github.com/containers/buildah/pkg/parse" - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/cmd/podman/shared" diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go index f079b914a..3b808a2ee 100644 --- a/pkg/adapter/runtime_remote.go +++ b/pkg/adapter/runtime_remote.go @@ -17,8 +17,8 @@ import ( "github.com/containers/buildah/imagebuildah" "github.com/containers/buildah/pkg/formats" - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/remoteclientconfig" iopodman "github.com/containers/libpod/cmd/podman/varlink" diff --git a/pkg/registries/registries.go b/pkg/registries/registries.go index de63dcbf1..b4facef42 100644 --- a/pkg/registries/registries.go +++ b/pkg/registries/registries.go @@ -5,8 +5,8 @@ import ( "path/filepath" "strings" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/types" + "github.com/containers/image/v4/pkg/sysregistriesv2" + "github.com/containers/image/v4/types" "github.com/containers/libpod/pkg/rootless" "github.com/docker/distribution/reference" "github.com/pkg/errors" diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go index 7c3195be4..a65263b7d 100644 --- a/pkg/spec/createconfig.go +++ b/pkg/spec/createconfig.go @@ -7,7 +7,7 @@ import ( "strings" "syscall" - "github.com/containers/image/manifest" + "github.com/containers/image/v4/manifest" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/namespaces" diff --git a/pkg/trust/trust.go b/pkg/trust/trust.go index 3bfe4bda1..afa89a6e8 100644 --- a/pkg/trust/trust.go +++ b/pkg/trust/trust.go @@ -11,7 +11,7 @@ import ( "path/filepath" "strings" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" diff --git a/pkg/util/utils.go b/pkg/util/utils.go index edcad1d1b..0190b106d 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -10,7 +10,7 @@ import ( "time" "github.com/BurntSushi/toml" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/pkg/errorhandling" "github.com/containers/libpod/pkg/namespaces" diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go index 0bdbec177..f83b93dff 100644 --- a/pkg/varlinkapi/images.go +++ b/pkg/varlinkapi/images.go @@ -16,10 +16,10 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/imagebuildah" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/manifest" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + dockerarchive "github.com/containers/image/v4/docker/archive" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/transports/alltransports" + "github.com/containers/image/v4/types" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/cmd/podman/varlink" "github.com/containers/libpod/libpod" diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 4334ff1cb..0385e4108 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -16,6 +16,7 @@ import ( //sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort //sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus //sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult type atomicBool int32 @@ -79,6 +80,7 @@ type win32File struct { wg sync.WaitGroup wgLock sync.RWMutex closing atomicBool + socket bool readDeadline deadlineHandler writeDeadline deadlineHandler } @@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { } func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return makeWin32File(h) + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil } // closeHandle closes the resources associated with a Win32 handle @@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er if f.closing.isSet() { err = ErrFileClosed } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: cancelIoEx(f.handle, &c.o) @@ -265,6 +277,10 @@ func (f *win32File) Flush() error { return syscall.FlushFileBuffers(f.handle) } +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + func (d *deadlineHandler) set(deadline time.Time) error { d.setLock.Lock() defer d.setLock.Unlock() diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod new file mode 100644 index 000000000..b3846826b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/go.mod @@ -0,0 +1,9 @@ +module github.com/Microsoft/go-winio + +go 1.12 + +require ( + github.com/pkg/errors v0.8.1 + github.com/sirupsen/logrus v1.4.1 + golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b +) diff --git a/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/Microsoft/go-winio/go.sum new file mode 100644 index 000000000..babb4a70d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/go.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 000000000..dbfe790ee --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,305 @@ +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) shutdown(how int) error { + err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, notifying the other endpoint that +// no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index d99eedb64..d6a46f6a2 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -3,10 +3,13 @@ package winio import ( + "context" "errors" + "fmt" "io" "net" "os" + "runtime" "syscall" "time" "unsafe" @@ -18,6 +21,48 @@ import ( //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} const ( cERROR_PIPE_BUSY = syscall.Errno(231) @@ -25,21 +70,20 @@ const ( cERROR_PIPE_CONNECTED = syscall.Errno(535) cERROR_SEM_TIMEOUT = syscall.Errno(121) - cPIPE_ACCESS_DUPLEX = 0x3 - cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 - cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + cPIPE_TYPE_MESSAGE = 4 - cPIPE_UNLIMITED_INSTANCES = 255 + cPIPE_READMODE_MESSAGE = 2 - cNMPWAIT_USE_DEFAULT_WAIT = 0 - cNMPWAIT_NOWAIT = 1 + cFILE_OPEN = 1 + cFILE_CREATE = 2 - cPIPE_TYPE_MESSAGE = 4 + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - cPIPE_READMODE_MESSAGE = 2 + cSE_DACL_PRESENT = 4 ) var ( @@ -137,9 +181,30 @@ func (s pipeAddress) String() string { return string(s) } +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) { + for { + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(time.Millisecond * 10) + } + } +} + // DialPipe connects to a named pipe by path, timing out if the connection // takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 5 seconds. (We do not use WaitNamedPipe.) +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { var absTimeout time.Time if timeout != nil { @@ -147,23 +212,22 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { } else { absTimeout = time.Now().Add(time.Second * 2) } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { var err error var h syscall.Handle - for { - h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != cERROR_PIPE_BUSY { - break - } - if time.Now().After(absTimeout) { - return nil, ErrTimeout - } - - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(time.Millisecond * 10) - } + h, err = tryDialPipe(ctx, &path) if err != nil { - return nil, &os.PathError{Op: "open", Path: path, Err: err} + return nil, err } var flags uint32 @@ -194,43 +258,87 @@ type acceptResponse struct { } type win32PipeListener struct { - firstHandle syscall.Handle - path string - securityDescriptor []byte - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int } -func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. if first { - flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } } - var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) if c.MessageMode { - mode |= cPIPE_TYPE_MESSAGE + typ |= cFILE_PIPE_MESSAGE_TYPE } - sa := &syscall.SecurityAttributes{} - sa.Length = uint32(unsafe.Sizeof(*sa)) - if securityDescriptor != nil { - len := uint32(len(securityDescriptor)) - sa.SecurityDescriptor = localAlloc(0, len) - defer localFree(sa.SecurityDescriptor) - copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE } - h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } + + runtime.KeepAlive(ntPath) return h, nil } func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) if err != nil { return nil, err } @@ -341,32 +449,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { if err != nil { return nil, err } - // Create a client handle and connect it. This results in the pipe - // instance always existing, so that clients see ERROR_PIPE_BUSY - // rather than ERROR_FILE_NOT_FOUND. This ties the first instance - // up so that no other instances can be used. This would have been - // cleaner if the Win32 API matched CreateFile with ConnectNamedPipe - // instead of CreateNamedPipe. (Apparently created named pipes are - // considered to be in listening state regardless of whether any - // active calls to ConnectNamedPipe are outstanding.) - h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != nil { - syscall.Close(h) - return nil, err - } - // Close the client handle. The server side of the instance will - // still be busy, leading to ERROR_PIPE_BUSY instead of - // ERROR_NOT_FOUND, as long as we don't close the server handle, - // or disconnect the client with DisconnectNamedPipe. - syscall.Close(h2) l := &win32PipeListener{ - firstHandle: h, - path: path, - securityDescriptor: sd, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), } go l.listenerRoutine() return l, nil diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 000000000..586406577 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,235 @@ +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" + + "golang.org/x/sys/windows" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go index 20d64cf41..5cb52bc74 100644 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,3 @@ package winio -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 3f527639a..e26b01faf 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated by 'go generate'; DO NOT EDIT. package winio @@ -38,19 +38,25 @@ func errnoErr(e syscall.Errno) error { var ( modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreateFileW = modkernel32.NewProc("CreateFileW") - procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") @@ -69,6 +75,7 @@ var ( procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procBackupRead = modkernel32.NewProc("BackupRead") procBackupWrite = modkernel32.NewProc("BackupWrite") + procbind = modws2_32.NewProc("bind") ) func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { @@ -120,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro return } +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) if r1 == 0 { @@ -176,27 +201,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA return } -func waitNamedPipe(name string, timeout uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _waitNamedPipe(_p0, timeout) -} - -func _waitNamedPipe(name *uint16, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) if r1 == 0 { @@ -227,6 +231,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { return } +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return +} + +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return +} + func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(accountName) @@ -518,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p } return } + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go new file mode 100644 index 000000000..916950c02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go @@ -0,0 +1,51 @@ +package osversion + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 000000000..2d9567f6f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,10 @@ +package osversion + +const ( + + // RS2 was a client-only release in case you're asking why it's not in the list. + RS1 = 14393 + RS3 = 16299 + RS4 = 17134 + RS5 = 17763 +) diff --git a/vendor/github.com/containers/buildah/.travis.yml b/vendor/github.com/containers/buildah/.travis.yml index 8379c649d..fbc0a7862 100644 --- a/vendor/github.com/containers/buildah/.travis.yml +++ b/vendor/github.com/containers/buildah/.travis.yml @@ -5,6 +5,7 @@ go: - 1.11.x - 1.12.x - tip +go_import_path: github.com/containers/buildah env: global: diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 61c29b200..0db48932c 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,11 @@ # Changelog +## v1.11.2 (2019-09-13) + Add some cleanup code + Move devices code to unit specific directory. + Bump back to v1.12.0-dev + ## v1.11.1 (2019-09-11) Add --devices flag to bud and from Downgrade .papr to highest atomic verion diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index b482fe1be..6f974ba86 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -12,7 +12,7 @@ import ( "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/ioutils" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -27,7 +27,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.11.2" + Version = "1.11.3" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 2ae070e8c..359ff5227 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,29 @@ +- Changelog for v1.11.3 (2019-10-04) + * Update c/image to v4.0.1 + * Bump github.com/spf13/pflag from 1.0.3 to 1.0.5 + * Fix --build-args handling + * Bump github.com/spf13/cobra from 0.0.3 to 0.0.5 + * Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2 + * Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1 + * Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4 + * Add support for retrieving context from stdin "-" + * Ensure bud remote context cleans up on error + * info: add cgroups2 + * Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1 + * Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6 + * Bump github.com/stretchr/testify from 1.3.0 to 1.4.0 + * Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0 + * Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3 + * Bump github.com/onsi/gomega from 1.5.0 to 1.7.0 + * update c/storage to v1.13.4 + * Print build 'STEP' line to stdout, not stderr + * Fix travis-ci on forks + * Vendor c/storage v1.13.3 + * Use Containerfile by default + * Added tutorial on how to include Buildah as library + * util/util: Fix "configuraitno" -> "configuration" log typo + * Bump back to v1.12.0-dev + - Changelog for v1.11.2 (2019-09-13) * Add some cleanup code * Move devices code to unit specific directory. diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index b3b56f39a..f46609525 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -12,14 +12,14 @@ import ( "github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/util" - cp "github.com/containers/image/copy" - "github.com/containers/image/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" + cp "github.com/containers/image/v4/copy" + "github.com/containers/image/v4/docker" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/signature" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/stringid" diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go index c08541ac7..a8b29231d 100644 --- a/vendor/github.com/containers/buildah/common.go +++ b/vendor/github.com/containers/buildah/common.go @@ -6,8 +6,8 @@ import ( "path/filepath" "github.com/containers/buildah/pkg/unshare" - cp "github.com/containers/image/copy" - "github.com/containers/image/types" + cp "github.com/containers/image/v4/copy" + "github.com/containers/image/v4/types" "github.com/containers/storage" ) diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index 0292ea43c..49b1930c5 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -8,9 +8,9 @@ import ( "time" "github.com/containers/buildah/docker" - "github.com/containers/image/manifest" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" "github.com/containers/storage/pkg/stringid" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go index 2011619f4..4b62e0e31 100644 --- a/vendor/github.com/containers/buildah/docker/types.go +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -7,8 +7,8 @@ package docker import ( "time" - "github.com/containers/image/pkg/strslice" - "github.com/opencontainers/go-digest" + "github.com/containers/image/v4/pkg/strslice" + digest "github.com/opencontainers/go-digest" ) // github.com/moby/moby/image/rootfs.go diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index bf30e9925..0bd592d48 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -3,59 +3,49 @@ module github.com/containers/buildah go 1.12 require ( - github.com/VividCortex/ewma v1.1.1 // indirect github.com/blang/semver v3.5.0+incompatible // indirect - github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b // indirect - github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50 // indirect - github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect github.com/containernetworking/cni v0.7.1 - github.com/containers/image v3.0.2+incompatible - github.com/containers/storage v1.13.2 - github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect - github.com/cyphar/filepath-securejoin v0.2.1 - github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 + github.com/containers/image/v4 v4.0.1 + github.com/containers/storage v1.13.4 + github.com/cyphar/filepath-securejoin v0.2.2 + github.com/docker/distribution v2.7.1+incompatible github.com/docker/docker-credential-helpers v0.6.1 // indirect + github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.4.0 github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 - github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/etcd-io/bbolt v1.3.2 - github.com/fsouza/go-dockerclient v1.3.0 + github.com/etcd-io/bbolt v1.3.3 + github.com/fsouza/go-dockerclient v1.4.4 github.com/ghodss/yaml v1.0.0 github.com/hashicorp/go-multierror v1.0.0 github.com/imdario/mergo v0.3.6 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 // indirect - github.com/mattn/go-isatty v0.0.4 // indirect - github.com/mattn/go-shellwords v1.0.5 - github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9 // indirect - github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 // indirect - github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c // indirect - github.com/onsi/ginkgo v1.8.0 - github.com/onsi/gomega v1.5.0 + github.com/mattn/go-shellwords v1.0.6 + github.com/morikuni/aec v1.0.0 // indirect + github.com/onsi/ginkgo v1.10.1 + github.com/onsi/gomega v1.7.0 github.com/opencontainers/go-digest v1.0.0-rc1 - github.com/opencontainers/image-spec v1.0.1 + github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 github.com/opencontainers/runtime-tools v0.9.0 - github.com/opencontainers/selinux v1.2.2 + github.com/opencontainers/selinux v1.3.0 github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible github.com/openshift/imagebuilder v1.1.0 github.com/pkg/errors v0.8.1 github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 - github.com/seccomp/libseccomp-golang v0.9.0 + github.com/seccomp/libseccomp-golang v0.9.1 github.com/sirupsen/logrus v1.4.2 - github.com/spf13/cobra v0.0.3 - github.com/spf13/pflag v1.0.3 - github.com/stretchr/testify v1.3.0 + github.com/spf13/cobra v0.0.5 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.4.0 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 - github.com/ulikunitz/xz v0.5.5 // indirect - github.com/urfave/cli v1.21.0 // indirect - github.com/vbauerster/mpb v3.4.0+incompatible // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/vishvananda/netlink v1.0.0 // indirect + github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect github.com/xeipuuv/gojsonschema v1.1.0 // indirect - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 - golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 + golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 + golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 // indirect + google.golang.org/grpc v1.24.0 // indirect k8s.io/api v0.0.0-20190813020757-36bff7324fb7 // indirect k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 // indirect ) diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index 6b69f2f36..6ebb9f91f 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -1,82 +1,75 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/BurntSushi/toml v0.2.0 h1:OthAm9ZSUx4uAmn3WbPwc06nowWrByRwBsYRhbmFjBs= -github.com/BurntSushi/toml v0.2.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo= github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/hcsshim v0.8.3 h1:KWCdVGOju81E0RL4ndn9/E6I4qMBi6kuPw1W4yBYlCw= -github.com/Microsoft/hcsshim v0.8.3/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/continuity v0.0.0-20180814194400-c7c5070e6f6e/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containernetworking/cni v0.7.0-rc2 h1:2GGDhbwdWPY53iT7LXy+LBP76Ch2D/hnw1U2zVFfGbk= -github.com/containernetworking/cni v0.7.0-rc2/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containers/image v2.0.0+incompatible h1:FTr6Br7jlIKNCKMjSOMbAxKp2keQ0//jzJaYNTVhauk= -github.com/containers/image v2.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image v2.0.1+incompatible h1:w39mlElA/aSFZ6moFa5N+A4MWu9c8hgdMiMMYnH94Hs= -github.com/containers/image v2.0.1+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image v3.0.0+incompatible h1:pdUHY//H+3jYNnoTt+rqY8NsStX4ZBLKzPTlMC+XvnU= -github.com/containers/image v3.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image v3.0.1+incompatible h1:VlNEQUI1JHa1SJfJ4jz/GBt7gpk+aRYGR6TUKsxXMkU= -github.com/containers/image v3.0.1+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE= github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/storage v1.12.10-0.20190725063046-8038df61d6f6 h1:c7Fq9bbRl0Ua6swRHAH8rkrK2fSt6K+ZBrXHD50kDR4= -github.com/containers/storage v1.12.10-0.20190725063046-8038df61d6f6/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= -github.com/containers/storage v1.12.13 h1:GtaLCY8p1Drlk1Oew581jGvB137UaO+kpz0HII67T0A= -github.com/containers/storage v1.12.13/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM= -github.com/containers/storage v1.12.14 h1:S1QGlC15gj5JOvB73W5tpVBApS4I7b/6rvxfflBAg+Q= -github.com/containers/storage v1.12.14/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= -github.com/containers/storage v1.12.15 h1:nN/RxtEe4ejasGVJqzy+y5++pIYp54XPXzRO46xXnns= -github.com/containers/storage v1.12.15/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= -github.com/containers/storage v1.12.16 h1:zePYS1GiG8CuRqLCeA0ufx4X27K06HcJLV50DdojL+Y= -github.com/containers/storage v1.12.16/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= -github.com/containers/storage v1.13.1 h1:rjVirLS9fCGkUFlLDZEoGDDUugtIf46DufWvJu08wxQ= -github.com/containers/storage v1.13.1/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= -github.com/containers/storage v1.13.2 h1:UXZ0Ckmk6+6+4vj2M2ywruVtH97pnRoAhTG8ctd+yQI= -github.com/containers/storage v1.13.2/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cyphar/filepath-securejoin v0.2.1 h1:5DPkzz/0MwUpvR4fxASKzgApeq2OMFY5FfYtrX28Coo= -github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/containers/image v4.0.0+incompatible h1:CfKbemfowbIg3nhq8rvtI+sdU9QbvODkiD+JLpOJMiQ= +github.com/containers/image v4.0.0+incompatible/go.mod h1:Td6tqqQu0miIBO8mauyzsVqBbv5WhKSE4pH2ZwslVp0= +github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ= +github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/storage v1.13.4 h1:j0bBaJDKbUHtAW1MXPFnwXJtqcH+foWeuXK1YaBV5GA= +github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 h1:4zlOyrJUbYnrvlzChJ+jP2J3i77Jbhm336NEuCv7kZo= github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23 h1:mJtkfC9RUrUWHMk0cFDNhVoc9U3k2FRAzEZ+5pqSIHo= -github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU= +github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g= github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20180608203834-19279f049241/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4= github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= @@ -84,46 +77,66 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0= -github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsouza/go-dockerclient v1.3.0 h1:tOXkq/5++XihrAvH5YNwCTdPeQg3XVcC6WI2FVy4ZS0= -github.com/fsouza/go-dockerclient v1.3.0/go.mod h1:IN9UPc4/w7cXiARH2Yg99XxUHbAM+6rAi9hzBVbkWRU= +github.com/fsouza/go-dockerclient v1.4.4 h1:Sd5nD4wdAgiPxvrbYUzT2ZZNmPk3z+GGnZ+frvw8z04= +github.com/fsouza/go-dockerclient v1.4.4/go.mod h1:PrwszSL5fbmsESocROrOGq/NULMXRw+bajY0ltzD6MA= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v0.0.0-20170217192616-94e7d24fd285/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM= +github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -131,59 +144,83 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 h1:NAAiV9ass6VReWFjuxqrMIq12WKlSULI6Gs3PxQghLA= github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y= github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI= +github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA= +github.com/mtrmac/image/v4 v4.0.0-20191001213151-121ffca6db69 h1:TVWS7od6UeGhdYqgXn/+EIDlulkGGV+r6FnjoxRJAl0= +github.com/mtrmac/image/v4 v4.0.0-20191001213151-121ffca6db69/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= +github.com/mtrmac/image/v4 v4.0.0-20191002203927-a64d9d2717f4 h1:AE5cilZfrGtAgMg5Ed4c2Y2KczlOsMVZAK055sSq+gc= +github.com/mtrmac/image/v4 v4.0.0-20191002203927-a64d9d2717f4/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= +github.com/mtrmac/image/v4 v4.0.0-20191003181245-f4c983e93262 h1:HMUEnWU3OPT09JRFQLn8VTp3GfdfiEhDMAEhkdX8QnA= +github.com/mtrmac/image/v4 v4.0.0-20191003181245-f4c983e93262/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0= github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -195,110 +232,167 @@ github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg= github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs= +github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g= +github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s= github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYMeFI18c4= github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= -github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365 h1:5DKEDlc/DLftia3h4tk5K0KBiqBXogCc6EarWTlD3fM= -github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 h1:AUK/hm/tPsiNNASdb3J8fySVRZoI7fnK5mlOvdFD43o= -github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 h1:kyf9snWXHvQc+yxE9imhdI8YAm4oKeZISlaAR+x73zs= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 h1:rOG9oHVIndNR14f3HRyBy9UPQYmIPniWqTU1TDdHhq4= github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA= -github.com/seccomp/libseccomp-golang v0.9.0 h1:S1pmhdFh5spQtVojA+4GUdWBqvI8ydYHxrx8iR6xN8o= -github.com/seccomp/libseccomp-golang v0.9.0/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ= -github.com/vbatts/tar-split v0.10.2 h1:CXd7HEKGkTLjBMinpObcJZU5Hm8EKlor2a1JtX6msXQ= -github.com/vbatts/tar-split v0.10.2/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= -github.com/vbauerster/mpb v3.3.4+incompatible h1:DDIhnwmgTQIDZo+SWlEr5d6mJBxkOLBwCXPzunhEfJ4= -github.com/vbauerster/mpb v3.3.4+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU= github.com/vbauerster/mpb v3.4.0+incompatible h1:mfiiYw87ARaeRW6x5gWwYRUawxaW1tLAD8IceomUCNw= github.com/vbauerster/mpb v3.4.0+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU= +github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vrothberg/storage v0.0.0-20190724065215-a1e42fd78930 h1:/LeIxi2kj5UYTJR9W35t5Pq2gqz03ZNoTURchTH3vc0= -github.com/vrothberg/storage v0.0.0-20190724065215-a1e42fd78930/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= +github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I= +github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg= github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0 h1:1DW40AJQ7AP4nY6ORUGUdkpXyEC9W2GAXcOPaMZK0K8= -golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -307,11 +401,14 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190813020757-36bff7324fb7 h1:4uJOjRn9kWq4AqJRE8+qzmAy+lJd9rh8TY455dNef4U= k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 h1:pyoq062NftC1y/OcnbSvgolyZDJ8y4fmUPWMkdA6gfU= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= +k8s.io/client-go v0.0.0-20170217214107-bcde30fb7eae/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 h1:+Qf/nITucAbm09aIdxvoA+7X0BwaXmQGVoR8k7Ynk9o= k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -320,4 +417,5 @@ k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index cca7dd836..829d8c21d 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -13,11 +13,11 @@ import ( "time" "github.com/containers/buildah/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/manifest" - is "github.com/containers/image/storage" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/manifest" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index f53018cd4..fe6cc266a 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -13,12 +13,12 @@ import ( "strings" "github.com/containers/buildah" - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runtime-spec/specs-go" + specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/openshift/imagebuilder" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index c65c3bab4..136261bf0 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -12,11 +12,11 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/util" - "github.com/containers/image/docker/reference" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/transports/alltransports" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -164,7 +164,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod stepCounter++ prefix := fmt.Sprintf("STEP %d: ", stepCounter) suffix := "\n" - fmt.Fprintf(exec.err, prefix+format+suffix, args...) + fmt.Fprintf(exec.out, prefix+format+suffix, args...) } } for arg := range options.Args { diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index e942b3b34..114d250a4 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -13,12 +13,12 @@ import ( "github.com/containers/buildah" buildahdocker "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - cp "github.com/containers/image/copy" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" + cp "github.com/containers/image/v4/copy" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" securejoin "github.com/cyphar/filepath-securejoin" @@ -253,7 +253,7 @@ func (s *StageExecutor) volumeCacheRestore() error { // don't care about the details of where in the filesystem the content actually // goes, because we're not actually going to add it here, so this is less // involved than Copy(). -func (s *StageExecutor) digestSpecifiedContent(node *parser.Node) (string, error) { +func (s *StageExecutor) digestSpecifiedContent(node *parser.Node, argValues []string) (string, error) { // No instruction: done. if node == nil { return "", nil @@ -297,7 +297,15 @@ func (s *StageExecutor) digestSpecifiedContent(node *parser.Node) (string, error } } } + for _, src := range srcs { + // If src has an argument within it, resolve it to its + // value. Otherwise just return the value found. + name, err := imagebuilder.ProcessWord(src, argValues) + if err != nil { + return "", errors.Wrapf(err, "unable to resolve source %q", src) + } + src = name if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { // Source is a URL. TODO: cache this content // somewhere, so that we can avoid pulling it down @@ -334,7 +342,14 @@ func (s *StageExecutor) digestSpecifiedContent(node *parser.Node) (string, error } s.builder.ContentDigester.Restart() download := strings.ToUpper(node.Value) == "ADD" - err := s.builder.Add(destination.Value, download, options, sources...) + + // If destination.Value has an argument within it, resolve it to its + // value. Otherwise just return the value found. + destValue, destErr := imagebuilder.ProcessWord(destination.Value, argValues) + if destErr != nil { + return "", errors.Wrapf(destErr, "unable to resolve destination %q", destination.Value) + } + err := s.builder.Add(destValue, download, options, sources...) if err != nil { return "", errors.Wrapf(err, "error dry-running %q", node.Original) } @@ -832,7 +847,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) } // In case we added content, retrieve its digest. - addedContentDigest, err := s.digestSpecifiedContent(node) + addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments()) if err != nil { return "", nil, err } @@ -881,7 +896,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // cached images so far, look for one that matches what we // expect to produce for this instruction. if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) { - addedContentDigest, err := s.digestSpecifiedContent(node) + addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments()) if err != nil { return "", nil, err } @@ -939,7 +954,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) } // In case we added content, retrieve its digest. - addedContentDigest, err := s.digestSpecifiedContent(node) + addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments()) if err != nil { return "", nil, err } diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go index 4b020bf41..7a94d9974 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/util.go +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -1,6 +1,8 @@ package imagebuildah import ( + "bufio" + "bytes" "fmt" "io/ioutil" "net/http" @@ -55,7 +57,25 @@ func downloadToDirectory(url, dir string) error { return nil } -// TempDirForURL checks if the passed-in string looks like a URL. If it is, +func stdinToDirectory(dir string) error { + logrus.Debugf("extracting stdin to %q", dir) + r := bufio.NewReader(os.Stdin) + b, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "Failed to read from stdin") + } + reader := bytes.NewReader(b) + if err := chrootarchive.Untar(reader, dir, nil); err != nil { + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile + if err := ioutil.WriteFile(dockerfile, b, 0600); err != nil { + return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile) + } + } + return nil +} + +// TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along // with the name of a subdirectory which should be used as the build context @@ -66,7 +86,8 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") && !strings.HasPrefix(url, "git://") && - !strings.HasPrefix(url, "github.com/") { + !strings.HasPrefix(url, "github.com/") && + url != "-" { return "", "", nil } name, err = ioutil.TempDir(dir, prefix) @@ -76,7 +97,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err if strings.HasPrefix(url, "git://") || strings.HasSuffix(url, ".git") { err = cloneToDirectory(url, name) if err != nil { - if err2 := os.Remove(name); err2 != nil { + if err2 := os.RemoveAll(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } return "", "", err @@ -92,11 +113,22 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { err = downloadToDirectory(url, name) if err != nil { - if err2 := os.Remove(name); err2 != nil { + if err2 := os.RemoveAll(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", subdir, err + } + return name, subdir, nil + } + if url == "-" { + err = stdinToDirectory(name) + if err != nil { + if err2 := os.RemoveAll(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } return "", subdir, err } + logrus.Debugf("Build context is at %q", name) return name, subdir, nil } logrus.Debugf("don't know how to retrieve %q", url) diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index b01d4d07b..4d3059527 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -5,11 +5,11 @@ import ( "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - "github.com/containers/image/manifest" - is "github.com/containers/image/storage" - "github.com/containers/image/types" + "github.com/containers/image/v4/manifest" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/types" "github.com/containers/storage" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index ed21ac1e9..c741bb449 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/containers/buildah/pkg/cgroups" "github.com/containers/buildah/pkg/unshare" "github.com/containers/storage" "github.com/containers/storage/pkg/system" @@ -45,6 +46,17 @@ func hostInfo() map[string]interface{} { info["arch"] = runtime.GOARCH info["cpus"] = runtime.NumCPU() info["rootless"] = unshare.IsRootless() + + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + logrus.Error(err, "err reading cgroups mode") + } + cgroupVersion := "v1" + if unified { + cgroupVersion = "v2" + } + info["CgroupVersion"] = cgroupVersion + mi, err := system.ReadMemInfo() if err != nil { logrus.Error(err, "err reading memory info") diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 31ae01be7..216a96611 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -7,12 +7,12 @@ import ( "strings" "github.com/containers/buildah/util" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/sysregistriesv2" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/pkg/sysregistriesv2" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/transports/alltransports" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/openshift/imagebuilder" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go index 53e6ec44b..539c894a3 100644 --- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -10,11 +10,11 @@ import ( "sync" "github.com/containers/buildah/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/manifest" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 36ae07190..f0996315a 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -14,10 +14,10 @@ import ( "unicode" "github.com/containers/buildah" - "github.com/containers/image/types" + "github.com/containers/image/v4/types" "github.com/containers/storage/pkg/idtools" - "github.com/docker/go-units" - "github.com/opencontainers/runtime-spec/specs-go" + units "github.com/docker/go-units" + specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index f05d2bf50..60dc3693f 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -8,18 +8,18 @@ import ( "github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/util" - cp "github.com/containers/image/copy" - "github.com/containers/image/directory" - "github.com/containers/image/docker" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/docker/reference" - tarfile "github.com/containers/image/docker/tarfile" - ociarchive "github.com/containers/image/oci/archive" - oci "github.com/containers/image/oci/layout" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" + cp "github.com/containers/image/v4/copy" + "github.com/containers/image/v4/directory" + "github.com/containers/image/v4/docker" + dockerarchive "github.com/containers/image/v4/docker/archive" + "github.com/containers/image/v4/docker/reference" + tarfile "github.com/containers/image/v4/docker/tarfile" + ociarchive "github.com/containers/image/v4/oci/archive" + oci "github.com/containers/image/v4/oci/layout" + "github.com/containers/image/v4/signature" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" "github.com/containers/storage" multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 9fbeb14d4..06492fad2 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -8,9 +8,9 @@ import ( "path/filepath" "github.com/containers/buildah/util" - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/pkg/sysregistriesv2" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" @@ -20,7 +20,7 @@ import ( "github.com/containers/storage/pkg/system" v1 "github.com/opencontainers/image-spec/specs-go/v1" rspec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/selinux/go-selinux" + selinux "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index 8ec767601..a572d1405 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -10,12 +10,12 @@ import ( "syscall" "github.com/containers/buildah/pkg/cgroups" - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/pkg/sysregistriesv2" + "github.com/containers/image/v4/signature" + is "github.com/containers/image/v4/storage" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/docker/distribution/registry/api/errcode" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -115,7 +115,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto for _, registry := range searchRegistries { reg, err := sysregistriesv2.FindRegistry(sc, registry) if err != nil { - logrus.Debugf("unable to read registry configuraitno for %#v: %v", registry, err) + logrus.Debugf("unable to read registry configuration for %#v: %v", registry, err) continue } if reg == nil || !reg.Blocked { diff --git a/vendor/github.com/containers/image/LICENSE b/vendor/github.com/containers/image/LICENSE deleted file mode 100644 index 953563530..000000000 --- a/vendor/github.com/containers/image/LICENSE +++ /dev/null @@ -1,189 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go deleted file mode 100644 index f1b029f97..000000000 --- a/vendor/github.com/containers/image/copy/copy.go +++ /dev/null @@ -1,920 +0,0 @@ -package copy - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "reflect" - "runtime" - "strings" - "sync" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/signature" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/klauspost/pgzip" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vbauerster/mpb" - "github.com/vbauerster/mpb/decor" - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/sync/semaphore" -) - -type digestingReader struct { - source io.Reader - digester digest.Digester - expectedDigest digest.Digest - validationFailed bool - validationSucceeded bool -} - -// maxParallelDownloads is used to limit the maxmimum number of parallel -// downloads. Let's follow Firefox by limiting it to 6. -var maxParallelDownloads = 6 - -// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error -// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. -// (neither is set if EOF is never reached). -func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { - if err := expectedDigest.Validate(); err != nil { - return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) - } - digestAlgorithm := expectedDigest.Algorithm() - if !digestAlgorithm.Available() { - return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) - } - return &digestingReader{ - source: source, - digester: digestAlgorithm.Digester(), - expectedDigest: expectedDigest, - validationFailed: false, - }, nil -} - -func (d *digestingReader) Read(p []byte) (int, error) { - n, err := d.source.Read(p) - if n > 0 { - if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { - // Coverage: This should not happen, the hash.Hash interface requires - // d.digest.Write to never return an error, and the io.Writer interface - // requires n2 == len(input) if no error is returned. - return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n) - } - } - if err == io.EOF { - actualDigest := d.digester.Digest() - if actualDigest != d.expectedDigest { - d.validationFailed = true - return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) - } - d.validationSucceeded = true - } - return n, err -} - -// copier allows us to keep track of diffID values for blobs, and other -// data shared across one or more images in a possible manifest list. -type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache types.BlobInfoCache - copyInParallel bool -} - -// imageCopier tracks state specific to a single image (possibly an item of a manifest list) -type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src types.Image - diffIDsAreNeeded bool - canModifyManifest bool - canSubstituteBlobs bool -} - -// Options allows supplying non-default configuration modifying the behavior of CopyImage. -type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. - SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), - ReportWriter io.Writer - SourceCtx *types.SystemContext - DestinationCtx *types.SystemContext - ProgressInterval time.Duration // time to wait between reports to signal the progress channel - Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. - // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type - ForceManifestMIMEType string -} - -// Image copies image from srcRef to destRef, using policyContext to validate -// source image admissibility. It returns the manifest which was written to -// the new copy of the image. -func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) { - // NOTE this function uses an output parameter for the error return value. - // Setting this and returning is the ideal way to return an error. - // - // the defers in this routine will wrap the error return with its own errors - // which can be valuable context in the middle of a multi-streamed copy. - if options == nil { - options = &Options{} - } - - reportWriter := ioutil.Discard - - if options.ReportWriter != nil { - reportWriter = options.ReportWriter - } - - dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) - if err != nil { - return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) - } - defer func() { - if err := dest.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (dest: %v)", err) - } - }() - - rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) - if err != nil { - return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) - } - defer func() { - if err := rawSource.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (src: %v)", err) - } - }() - - // If reportWriter is not a TTY (e.g., when piping to a file), do not - // print the progress bars to avoid long and hard to parse output. - // createProgressBar() will print a single line instead. - progressOutput := reportWriter - if !isTTY(reportWriter) { - progressOutput = ioutil.Discard - } - copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() - c := &copier{ - dest: dest, - rawSource: rawSource, - reportWriter: reportWriter, - progressOutput: progressOutput, - progressInterval: options.ProgressInterval, - progress: options.Progress, - copyInParallel: copyInParallel, - // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. - // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually - // we might want to add a separate CommonCtx — or would that be too confusing? - blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), - } - - unparsedToplevel := image.UnparsedInstance(rawSource, nil) - multiImage, err := isMultiImage(ctx, unparsedToplevel) - if err != nil { - return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) - } - - if !multiImage { - // The simple case: Just copy a single image. - if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil { - return nil, err - } - } else { - // This is a manifest list. Choose a single image and copy it. - // FIXME: Copy to destinations which support manifest lists, one image at a time. - instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel) - if err != nil { - return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) - } - logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) - unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) - - if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil { - return nil, err - } - } - - if err := c.dest.Commit(ctx); err != nil { - return nil, errors.Wrap(err, "Error committing the finished image") - } - - return manifest, nil -} - -// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate -// source image admissibility. -func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) { - // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. - // Make sure we fail cleanly in such cases. - multiImage, err := isMultiImage(ctx, unparsedImage) - if err != nil { - // FIXME FIXME: How to name a reference for the sub-image? - return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) - } - if multiImage { - return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") - } - - // Please keep this policy check BEFORE reading any other information about the image. - // (the multiImage check above only matches the MIME type, which we have received anyway. - // Actual parsing of anything should be deferred.) - if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return nil, errors.Wrap(err, "Source image rejected") - } - src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) - if err != nil { - return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) - } - - // If the destination is a digested reference, make a note of that, determine what digest value we're - // expecting, and check that the source manifest matches it. - destIsDigestedReference := false - if named := c.dest.Reference().DockerReference(); named != nil { - if digested, ok := named.(reference.Digested); ok { - destIsDigestedReference = true - sourceManifest, _, err := src.Manifest(ctx) - if err != nil { - return nil, errors.Wrapf(err, "Error reading manifest from source image") - } - matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) - if err != nil { - return nil, errors.Wrapf(err, "Error computing digest of source image's manifest") - } - if !matches { - return nil, errors.New("Digest of source image's manifest would not match destination reference") - } - } - } - - if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil { - return nil, err - } - - var sigs [][]byte - if options.RemoveSignatures { - sigs = [][]byte{} - } else { - c.Printf("Getting image source signatures\n") - s, err := src.Signatures(ctx) - if err != nil { - return nil, errors.Wrap(err, "Error reading signatures") - } - sigs = s - } - if len(sigs) != 0 { - c.Printf("Checking if image destination supports signatures\n") - if err := c.dest.SupportsSignatures(ctx); err != nil { - return nil, errors.Wrap(err, "Can not copy signatures") - } - } - - ic := imageCopier{ - c: c, - manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, - src: src, - // diffIDsAreNeeded is computed later - canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, - } - // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. - // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: - // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. - // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk - // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, - // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" - - if err := ic.updateEmbeddedDockerReference(); err != nil { - return nil, err - } - - // We compute preferredManifestMIMEType only to show it in error messages. - // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) - if err != nil { - return nil, err - } - - // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. - ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - - if err := ic.copyLayers(ctx); err != nil { - return nil, err - } - - // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; - // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support - // without actually trying to upload something and getting a types.ManifestTypeRejectedError. - // So, try the preferred manifest MIME type. If the process succeeds, fine… - manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx) - if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) - // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. - if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { - // We don’t have other options. - // In principle the code below would handle this as well, but the resulting error message is fairly ugly. - // Don’t bother the user with MIME types if we have no choice. - return nil, err - } - // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. - // So if we are here, we will definitely be trying to convert the manifest. - // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, - // so let’s bail out early and with a better error message. - if !ic.canModifyManifest { - return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") - } - - // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} - for _, manifestMIMEType := range otherManifestMIMETypeCandidates { - logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) - ic.manifestUpdates.ManifestMIMEType = manifestMIMEType - attemptedManifest, err := ic.copyUpdatedConfigAndManifest(ctx) - if err != nil { - logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) - errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) - continue - } - - // We have successfully uploaded a manifest. - manifestBytes = attemptedManifest - errs = nil // Mark this as a success so that we don't abort below. - break - } - if errs != nil { - return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) - } - } - - if options.SignBy != "" { - newSig, err := c.createSignature(manifestBytes, options.SignBy) - if err != nil { - return nil, err - } - sigs = append(sigs, newSig) - } - - c.Printf("Storing signatures\n") - if err := c.dest.PutSignatures(ctx, sigs); err != nil { - return nil, errors.Wrap(err, "Error writing signatures") - } - - return manifestBytes, nil -} - -// Printf writes a formatted string to c.reportWriter. -// Note that the method name Printf is not entirely arbitrary: (go tool vet) -// has a built-in list of functions/methods (whatever object they are for) -// which have their format strings checked; for other names we would have -// to pass a parameter to every (go tool vet) invocation. -func (c *copier) Printf(format string, a ...interface{}) { - fmt.Fprintf(c.reportWriter, format, a...) -} - -func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { - if dest.MustMatchRuntimeOS() { - wantedOS := runtime.GOOS - if sys != nil && sys.OSChoice != "" { - wantedOS = sys.OSChoice - } - c, err := src.OCIConfig(ctx) - if err != nil { - return errors.Wrapf(err, "Error parsing image configuration") - } - osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) - if wantedOS == "windows" && c.OS == "linux" { - return osErr - } else if wantedOS != "windows" && c.OS == "windows" { - return osErr - } - } - return nil -} - -// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. -func (ic *imageCopier) updateEmbeddedDockerReference() error { - if ic.c.dest.IgnoresEmbeddedDockerReference() { - return nil // Destination would prefer us not to update the embedded reference. - } - destRef := ic.c.dest.Reference().DockerReference() - if destRef == nil { - return nil // Destination does not care about Docker references - } - if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { - return nil // No reference embedded in the manifest, or it matches destRef already. - } - - if !ic.canModifyManifest { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", - transports.ImageName(ic.c.dest.Reference()), destRef.String()) - } - ic.manifestUpdates.EmbeddedDockerReference = destRef - return nil -} - -// isTTY returns true if the io.Writer is a file and a tty. -func isTTY(w io.Writer) bool { - if f, ok := w.(*os.File); ok { - return terminal.IsTerminal(int(f.Fd())) - } - return false -} - -// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. -func (ic *imageCopier) copyLayers(ctx context.Context) error { - srcInfos := ic.src.LayerInfos() - numLayers := len(srcInfos) - updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) - if err != nil { - return err - } - srcInfosUpdated := false - if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if !ic.canModifyManifest { - return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") - } - srcInfos = updatedSrcInfos - srcInfosUpdated = true - } - - type copyLayerData struct { - destInfo types.BlobInfo - diffID digest.Digest - err error - } - - // copyGroup is used to determine if all layers are copied - copyGroup := sync.WaitGroup{} - copyGroup.Add(numLayers) - - // copySemaphore is used to limit the number of parallel downloads to - // avoid malicious images causing troubles and to be nice to servers. - var copySemaphore *semaphore.Weighted - if ic.c.copyInParallel { - copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) - } else { - copySemaphore = semaphore.NewWeighted(int64(1)) - } - - data := make([]copyLayerData, numLayers) - copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { - defer copySemaphore.Release(1) - defer copyGroup.Done() - cld := copyLayerData{} - if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { - // DiffIDs are, currently, needed only when converting from schema1. - // In which case src.LayerInfos will not have URLs because schema1 - // does not support them. - if ic.diffIDsAreNeeded { - cld.err = errors.New("getting DiffID for foreign layers is unimplemented") - } else { - cld.destInfo = srcLayer - logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) - } - } else { - cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) - } - data[index] = cld - } - - func() { // A scope for defer - progressPool, progressCleanup := ic.c.newProgressPool(ctx) - defer progressCleanup() - - for i, srcLayer := range srcInfos { - copySemaphore.Acquire(ctx, 1) - go copyLayerHelper(i, srcLayer, progressPool) - } - - // Wait for all layers to be copied - copyGroup.Wait() - }() - - destInfos := make([]types.BlobInfo, numLayers) - diffIDs := make([]digest.Digest, numLayers) - for i, cld := range data { - if cld.err != nil { - return cld.err - } - destInfos[i] = cld.destInfo - diffIDs[i] = cld.diffID - } - - ic.manifestUpdates.InformationOnly.LayerInfos = destInfos - if ic.diffIDsAreNeeded { - ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs - } - if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { - ic.manifestUpdates.LayerInfos = destInfos - } - return nil -} - -// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) -func layerDigestsDiffer(a, b []types.BlobInfo) bool { - if len(a) != len(b) { - return true - } - for i := range a { - if a[i].Digest != b[i].Digest { - return true - } - } - return false -} - -// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, -// stores the resulting config and manifest to the destination, and returns the stored manifest. -func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte, error) { - pendingImage := ic.src - if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { - if !ic.canModifyManifest { - return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") - } - if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { - // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. - // So, this can only happen if we are trying to upload using one of the other MIME type candidates. - // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise - // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. - // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. - // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. - return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) - } - pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) - if err != nil { - return nil, errors.Wrap(err, "Error creating an updated image manifest") - } - pendingImage = pi - } - manifest, _, err := pendingImage.Manifest(ctx) - if err != nil { - return nil, errors.Wrap(err, "Error reading manifest") - } - - if err := ic.c.copyConfig(ctx, pendingImage); err != nil { - return nil, err - } - - ic.c.Printf("Writing manifest to image destination\n") - if err := ic.c.dest.PutManifest(ctx, manifest); err != nil { - return nil, errors.Wrap(err, "Error writing manifest") - } - return manifest, nil -} - -// newProgressPool creates a *mpb.Progress and a cleanup function. -// The caller must eventually call the returned cleanup function after the pool will no longer be updated. -func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { - ctx, cancel := context.WithCancel(ctx) - pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx)) - return pool, func() { - cancel() - pool.Wait() - } -} - -// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter -// is ioutil.Discard, the progress bar's output will be discarded -func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { - // shortDigestLen is the length of the digest used for blobs. - const shortDigestLen = 12 - - prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) - // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. - maxPrefixLen := len("Copying blob ") + shortDigestLen - if len(prefix) > maxPrefixLen { - prefix = prefix[:maxPrefixLen] - } - - // Use a normal progress bar when we know the size (i.e., size > 0). - // Otherwise, use a spinner to indicate that something's happening. - var bar *mpb.Bar - if info.Size > 0 { - bar = pool.AddBar(info.Size, - mpb.BarClearOnComplete(), - mpb.PrependDecorators( - decor.Name(prefix), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete), - ), - ) - } else { - bar = pool.AddSpinner(info.Size, - mpb.SpinnerOnLeft, - mpb.BarClearOnComplete(), - mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}), - mpb.PrependDecorators( - decor.Name(prefix), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.Name(""), " "+onComplete), - ), - ) - } - if c.progressOutput == ioutil.Discard { - c.Printf("Copying %s %s\n", kind, info.Digest) - } - return bar -} - -// copyConfig copies config.json, if any, from src to dest. -func (c *copier) copyConfig(ctx context.Context, src types.Image) error { - srcInfo := src.ConfigInfo() - if srcInfo.Digest != "" { - configBlob, err := src.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) - } - - destInfo, err := func() (types.BlobInfo, error) { // A scope for defer - progressPool, progressCleanup := c.newProgressPool(ctx) - defer progressCleanup() - bar := c.createProgressBar(progressPool, srcInfo, "config", "done") - destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) - if err != nil { - return types.BlobInfo{}, err - } - bar.SetTotal(int64(len(configBlob)), true) - return destInfo, nil - }() - if err != nil { - return nil - } - if destInfo.Digest != srcInfo.Digest { - return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) - } - } - return nil -} - -// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. -// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. -type diffIDResult struct { - digest digest.Digest - err error -} - -// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, -// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { - cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" - - // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. - if !diffIDIsNeeded { - reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) - } - if reused { - logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) - bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") - bar.SetTotal(0, true) - return blobInfo, cachedDiffID, nil - } - } - - // Fallback: copy the layer, computing the diffID if we need to do so - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - - bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") - - blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar) - if err != nil { - return types.BlobInfo{}, "", err - } - - diffID := cachedDiffID - if diffIDIsNeeded { - select { - case <-ctx.Done(): - return types.BlobInfo{}, "", ctx.Err() - case diffIDResult := <-diffIDChan: - if diffIDResult.err != nil { - return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") - } - logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) - diffID = diffIDResult.digest - } - } - - bar.SetTotal(srcInfo.Size, true) - return blobInfo, diffID, nil -} - -// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. -// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, -// perhaps compressing the stream if canCompress, -// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. -func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { - var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil - var diffIDChan chan diffIDResult - - err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below - if diffIDIsNeeded { - diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. - pipeReader, pipeWriter := io.Pipe() - defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. - pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() - }() - - getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { - // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further - // reading from the pipe has failed, we don’t really care. - // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, - // the return value includes an error indication, which we do check. - // - // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be - // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. - go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader - return pipeWriter - } - } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success - return blobInfo, diffIDChan, err - // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan -} - -// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. -func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { - result := diffIDResult{ - digest: "", - err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), - } - defer func() { dest <- result }() - defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. - - result.digest, result.err = computeDiffID(layerStream, decompressor) -} - -// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. -func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { - if decompressor != nil { - s, err := decompressor(stream) - if err != nil { - return "", err - } - defer s.Close() - stream = s - } - - return digest.Canonical.FromReader(stream) -} - -// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, -// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps compressing it if canCompress, -// and returns a complete blobInfo of the copied blob. -func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) { - // The copying happens through a pipeline of connected io.Readers. - // === Input: srcStream - - // === Process input through digestingReader to validate against the expected digest. - // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, - // use a separate validation failure indicator. - // Note that for this check we don't use the stronger "validationSucceeded" indicator, because - // dest.PutBlob may detect that the layer already exists, in which case we don't - // read stream to the end, and validation does not happen. - digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) - } - var destStream io.Reader = digestingReader - - // === Detect compression of the input stream. - // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. - decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) - } - isCompressed := decompressor != nil - destStream = bar.ProxyReader(destStream) - - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. - var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. - if getOriginalLayerCopyWriter != nil { - destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) - originalLayerReader = destStream - } - - // === Deal with layer compression/decompression if necessary - var inputInfo types.BlobInfo - var compressionOperation types.LayerCompression - if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { - logrus.Debugf("Compressing blob on the fly") - compressionOperation = types.Compress - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, - // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, - // we don’t care. - go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { - logrus.Debugf("Blob will be decompressed") - compressionOperation = types.Decompress - s, err := decompressor(destStream) - if err != nil { - return types.BlobInfo{}, err - } - defer s.Close() - destStream = s - inputInfo.Digest = "" - inputInfo.Size = -1 - } else { - logrus.Debugf("Using original blob without modification") - compressionOperation = types.PreserveOriginal - inputInfo = srcInfo - } - - // === Report progress using the c.progress channel, if required. - if c.progress != nil && c.progressInterval > 0 { - destStream = &progressReader{ - source: destStream, - channel: c.progress, - interval: c.progressInterval, - artifact: srcInfo, - lastTime: time.Now(), - } - } - - // === Finally, send the layer stream to dest. - uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig) - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") - } - - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer - // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. - // So, read everything from originalLayerReader, which will cause the rest to be - // sent there if we are not already at EOF. - if getOriginalLayerCopyWriter != nil { - logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest) - } - } - - if digestingReader.validationFailed { // Coverage: This should never happen. - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) - } - if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) - } - if digestingReader.validationSucceeded { - // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: - // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader - // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob - // (because inputInfo.Digest == "", this must have been computed afresh). - switch compressionOperation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: - c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: - c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) - default: - return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) - } - } - return uploadedInfo, nil -} - -// compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func compressGoroutine(dest *io.PipeWriter, src io.Reader) { - err := errors.New("Internal error: unexpected panic in compressGoroutine") - defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. - dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() - }() - - zipper := pgzip.NewWriter(dest) - defer zipper.Close() - - _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() -} diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go deleted file mode 100644 index e8cc8a9e7..000000000 --- a/vendor/github.com/containers/image/copy/manifest.go +++ /dev/null @@ -1,121 +0,0 @@ -package copy - -import ( - "context" - "strings" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. -// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. -// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. -var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} - -// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. -type orderedSet struct { - list []string - included map[string]struct{} -} - -// newOrderedSet creates a correctly initialized orderedSet. -// [Sometimes it would be really nice if Golang had constructors…] -func newOrderedSet() *orderedSet { - return &orderedSet{ - list: []string{}, - included: map[string]struct{}{}, - } -} - -// append adds s to the end of os, only if it is not included already. -func (os *orderedSet) append(s string) { - if _, ok := os.included[s]; !ok { - os.list = append(os.list, s) - os.included[s] = struct{}{} - } -} - -// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. -// Note that the conversion will only happen later, through ic.src.UpdatedImage -// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), -// and a list of other possible alternatives, in order. -func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { - _, srcType, err := ic.src.Manifest(ctx) - if err != nil { // This should have been cached?! - return "", nil, errors.Wrap(err, "Error reading manifest") - } - normalizedSrcType := manifest.NormalizedMIMEType(srcType) - if srcType != normalizedSrcType { - logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) - srcType = normalizedSrcType - } - - if forceManifestMIMEType != "" { - destSupportedManifestMIMETypes = []string{forceManifestMIMEType} - } - - if len(destSupportedManifestMIMETypes) == 0 { - return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. - } - supportedByDest := map[string]struct{}{} - for _, t := range destSupportedManifestMIMETypes { - supportedByDest[t] = struct{}{} - } - - // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. - // So, build a list of types to try in order of decreasing preference. - // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, - // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. - // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types - // and never attempt the other one. - prioritizedTypes := newOrderedSet() - - // First of all, prefer to keep the original manifest unmodified. - if _, ok := supportedByDest[srcType]; ok { - prioritizedTypes.append(srcType) - } - if !ic.canModifyManifest { - // We could also drop the !ic.canModifyManifest check and have the caller - // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” - // special case in here; the caller can then worry (or not) only about a good UI. - logrus.Debugf("We can't modify the manifest, hoping for the best...") - return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? - } - - // Then use our list of preferred types. - for _, t := range preferredManifestMIMETypes { - if _, ok := supportedByDest[t]; ok { - prioritizedTypes.append(t) - } - } - - // Finally, try anything else the destination supports. - for _, t := range destSupportedManifestMIMETypes { - prioritizedTypes.append(t) - } - - logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. - return "", nil, errors.New("Internal error: no candidate MIME types") - } - preferredType := prioritizedTypes.list[0] - if preferredType != srcType { - ic.manifestUpdates.ManifestMIMEType = preferredType - } else { - logrus.Debugf("... will first try using the original manifest unmodified") - } - return preferredType, prioritizedTypes.list[1:], nil -} - -// isMultiImage returns true if img is a list of images -func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { - _, mt, err := img.Manifest(ctx) - if err != nil { - return false, err - } - return manifest.MIMETypeIsMultiImage(mt), nil -} diff --git a/vendor/github.com/containers/image/copy/progress_reader.go b/vendor/github.com/containers/image/copy/progress_reader.go deleted file mode 100644 index b670ee59f..000000000 --- a/vendor/github.com/containers/image/copy/progress_reader.go +++ /dev/null @@ -1,28 +0,0 @@ -package copy - -import ( - "io" - "time" - - "github.com/containers/image/types" -) - -// progressReader is a reader that reports its progress on an interval. -type progressReader struct { - source io.Reader - channel chan types.ProgressProperties - interval time.Duration - artifact types.BlobInfo - lastTime time.Time - offset uint64 -} - -func (r *progressReader) Read(p []byte) (int, error) { - n, err := r.source.Read(p) - r.offset += uint64(n) - if time.Since(r.lastTime) > r.interval { - r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset} - r.lastTime = time.Now() - } - return n, err -} diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go deleted file mode 100644 index 91394d2b0..000000000 --- a/vendor/github.com/containers/image/copy/sign.go +++ /dev/null @@ -1,31 +0,0 @@ -package copy - -import ( - "github.com/containers/image/signature" - "github.com/containers/image/transports" - "github.com/pkg/errors" -) - -// createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { - mech, err := signature.NewGPGSigningMechanism() - if err != nil { - return nil, errors.Wrap(err, "Error initializing GPG") - } - defer mech.Close() - if err := mech.SupportsSigning(); err != nil { - return nil, errors.Wrap(err, "Signing not supported") - } - - dockerReference := c.dest.Reference().DockerReference() - if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) - } - - c.Printf("Signing manifest\n") - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) - if err != nil { - return nil, errors.Wrap(err, "Error creating signature") - } - return newSig, nil -} diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go deleted file mode 100644 index 4b2ab022e..000000000 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ /dev/null @@ -1,260 +0,0 @@ -package directory - -import ( - "context" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const version = "Directory Transport Version: 1.1\n" - -// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created -// using the 'dir' transport -var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") - -type dirImageDestination struct { - ref dirReference - compress bool -} - -// newImageDestination returns an ImageDestination for writing to a directory. -func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { - d := &dirImageDestination{ref: ref, compress: compress} - - // If directory exists check if it is empty - // if not empty, check whether the contents match that of a container image directory and overwrite the contents - // if the contents don't match throw an error - dirExists, err := pathExists(d.ref.resolvedPath) - if err != nil { - return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) - } - if dirExists { - isEmpty, err := isDirEmpty(d.ref.resolvedPath) - if err != nil { - return nil, err - } - - if !isEmpty { - versionExists, err := pathExists(d.ref.versionPath()) - if err != nil { - return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) - } - if versionExists { - contents, err := ioutil.ReadFile(d.ref.versionPath()) - if err != nil { - return nil, err - } - // check if contents of version file is what we expect it to be - if string(contents) != version { - return nil, ErrNotContainerImageDir - } - } else { - return nil, ErrNotContainerImageDir - } - // delete directory contents so that only one image is in the directory at a time - if err = removeDirContents(d.ref.resolvedPath); err != nil { - return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) - } - logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) - } - } else { - // create directory if it doesn't exist - if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { - return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) - } - } - // create version file - err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) - if err != nil { - return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) - } - return d, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dirImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dirImageDestination) Close() error { - return nil -} - -func (d *dirImageDestination) SupportedManifestMIMETypes() []string { - return nil -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { - if d.compress { - return types.Compress - } - return types.PreserveOriginal -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *dirImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *dirImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") - if err != nil { - return types.BlobInfo{}, err - } - succeeded := false - defer func() { - blobFile.Close() - if !succeeded { - os.Remove(blobFile.Name()) - } - }() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err - } - blobPath := d.ref.layerPath(computedDigest) - if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err - } - succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - blobPath := d.ref.layerPath(info.Digest) - finfo, err := os.Stat(blobPath) - if err != nil && os.IsNotExist(err) { - return false, types.BlobInfo{}, nil - } - if err != nil { - return false, types.BlobInfo{}, err - } - return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil - -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte) error { - return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) -} - -func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil { - return err - } - } - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dirImageDestination) Commit(ctx context.Context) error { - return nil -} - -// returns true if path exists -func pathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if err != nil && os.IsNotExist(err) { - return false, nil - } - return false, err -} - -// returns true if directory is empty -func isDirEmpty(path string) (bool, error) { - files, err := ioutil.ReadDir(path) - if err != nil { - return false, err - } - return len(files) == 0, nil -} - -// deletes the contents of a directory -func removeDirContents(path string) error { - files, err := ioutil.ReadDir(path) - if err != nil { - return err - } - - for _, file := range files { - if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go deleted file mode 100644 index 59b625397..000000000 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ /dev/null @@ -1,96 +0,0 @@ -package directory - -import ( - "context" - "io" - "io/ioutil" - "os" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type dirImageSource struct { - ref dirReference -} - -// newImageSource returns an ImageSource reading from an existing directory. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ref dirReference) types.ImageSource { - return &dirImageSource{ref} -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dirImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dirImageSource) Close() error { - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) - } - m, err := ioutil.ReadFile(s.ref.manifestPath()) - if err != nil { - return nil, "", err - } - return m, manifest.GuessMIMEType(m), err -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *dirImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - r, err := os.Open(s.ref.layerPath(info.Digest)) - if err != nil { - return nil, -1, err - } - fi, err := r.Stat() - if err != nil { - return nil, -1, err - } - return r, fi.Size(), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) - } - signatures := [][]byte{} - for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) - if err != nil { - if os.IsNotExist(err) { - break - } - return nil, err - } - signatures = append(signatures, signature) - } - return signatures, nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. -func (s *dirImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go deleted file mode 100644 index 66b9e7258..000000000 --- a/vendor/github.com/containers/image/directory/directory_transport.go +++ /dev/null @@ -1,187 +0,0 @@ -package directory - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for directory paths. -var Transport = dirTransport{} - -type dirTransport struct{} - -func (t dirTransport) Name() string { - return "dir" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { - return NewReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { - if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(scope) - if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) - } - return nil -} - -// dirReference is an ImageReference for directory paths. -type dirReference struct { - // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! - // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. - - // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid - // being exposed to symlinks and renames in the parent directories to the working directory). - // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) - path string // As specified by the user. May be relative, contain symlinks, etc. - resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. -} - -// There is no directory.ParseReference because it is rather pointless. -// Callers who need a transport-independent interface will go through -// dirTransport.ParseReference; callers who intentionally deal with directories -// can use directory.NewReference. - -// NewReference returns a directory reference for a specified path. -// -// We do not expose an API supplying the resolvedPath; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedPath. -func NewReference(path string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) - if err != nil { - return nil, err - } - return dirReference{path: path, resolvedPath: resolved}, nil -} - -func (ref dirReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dirReference) StringWithinTransport() string { - return ref.path -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dirReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dirReference) PolicyConfigurationIdentity() string { - return ref.resolvedPath -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dirReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedPath - for { - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 || lastSlash == 0 { - break - } - path = path[:lastSlash] - res = append(res, path) - } - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by dirTransport.ValidatePolicyConfigurationScope above. - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src := newImageSource(ref) - return image.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ref), nil -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - compress := false - if sys != nil { - compress = sys.DirForceCompress - } - return newImageDestination(ref, compress) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for dir: images") -} - -// manifestPath returns a path for the manifest within a directory using our conventions. -func (ref dirReference) manifestPath() string { - return filepath.Join(ref.path, "manifest.json") -} - -// layerPath returns a path for a layer tarball within a directory using our conventions. -func (ref dirReference) layerPath(digest digest.Digest) string { - // FIXME: Should we keep the digest identification? - return filepath.Join(ref.path, digest.Hex()) -} - -// signaturePath returns a path for a signature within a directory using our conventions. -func (ref dirReference) signaturePath(index int) string { - return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) -} - -// versionPath returns a path for the version file within a directory using our conventions. -func (ref dirReference) versionPath() string { - return filepath.Join(ref.path, "version") -} diff --git a/vendor/github.com/containers/image/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/directory/explicitfilepath/path.go deleted file mode 100644 index 71136b880..000000000 --- a/vendor/github.com/containers/image/directory/explicitfilepath/path.go +++ /dev/null @@ -1,56 +0,0 @@ -package explicitfilepath - -import ( - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. -// To do so, all elements of the input path must exist; as a special case, the final component may be -// a non-existent name (but not a symlink pointing to a non-existent name) -// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. -func ResolvePathToFullyExplicit(path string) (string, error) { - switch _, err := os.Lstat(path); { - case err == nil: - return resolveExistingPathToFullyExplicit(path) - case os.IsNotExist(err): - parent, file := filepath.Split(path) - resolvedParent, err := resolveExistingPathToFullyExplicit(parent) - if err != nil { - return "", err - } - if file == "." || file == ".." { - // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. - // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. - // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components - // in the resulting path, and especially not at the end. - return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) - } - resolvedPath := filepath.Join(resolvedParent, file) - // As a sanity check, ensure that there are no "." or ".." components. - cleanedResolvedPath := filepath.Clean(resolvedPath) - if cleanedResolvedPath != resolvedPath { - // Coverage: This should never happen. - return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) - } - return resolvedPath, nil - default: // err != nil, unrecognized - return "", err - } -} - -// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, -// but without the special case for missing final component. -func resolveExistingPathToFullyExplicit(path string) (string, error) { - resolved, err := filepath.Abs(path) - if err != nil { - return "", err // Coverage: This can fail only if os.Getwd() fails. - } - resolved, err = filepath.EvalSymlinks(resolved) - if err != nil { - return "", err - } - return filepath.Clean(resolved), nil -} diff --git a/vendor/github.com/containers/image/docker/archive/dest.go b/vendor/github.com/containers/image/docker/archive/dest.go deleted file mode 100644 index c88aea3b3..000000000 --- a/vendor/github.com/containers/image/docker/archive/dest.go +++ /dev/null @@ -1,72 +0,0 @@ -package archive - -import ( - "context" - "io" - "os" - - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -type archiveImageDestination struct { - *tarfile.Destination // Implements most of types.ImageDestination - ref archiveReference - writer io.Closer -} - -func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { - // ref.path can be either a pipe or a regular file - // in the case of a pipe, we require that we can open it for write - // in the case of a regular file, we don't want to overwrite any pre-existing file - // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, - // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) - fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, errors.Wrapf(err, "error opening file %q", ref.path) - } - - fhStat, err := fh.Stat() - if err != nil { - return nil, errors.Wrapf(err, "error statting file %q", ref.path) - } - - if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { - return nil, errors.New("docker-archive doesn't support modifying existing images") - } - - tarDest := tarfile.NewDestination(fh, ref.destinationRef) - if sys != nil && sys.DockerArchiveAdditionalTags != nil { - tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) - } - return &archiveImageDestination{ - Destination: tarDest, - ref: ref, - writer: fh, - }, nil -} - -// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved -func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Decompress -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *archiveImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *archiveImageDestination) Close() error { - return d.writer.Close() -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *archiveImageDestination) Commit(ctx context.Context) error { - return d.Destination.Commit(ctx) -} diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go deleted file mode 100644 index e46c9db4a..000000000 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ /dev/null @@ -1,40 +0,0 @@ -package archive - -import ( - "context" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/sirupsen/logrus" -) - -type archiveImageSource struct { - *tarfile.Source // Implements most of types.ImageSource - ref archiveReference -} - -// newImageSource returns a types.ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) { - if ref.destinationRef != nil { - logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") - } - src, err := tarfile.NewSourceFromFile(ref.path) - if err != nil { - return nil, err - } - return &archiveImageSource{ - Source: src, - ref: ref, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *archiveImageSource) Reference() types.ImageReference { - return s.ref -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *archiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go deleted file mode 100644 index f345b343c..000000000 --- a/vendor/github.com/containers/image/docker/archive/transport.go +++ /dev/null @@ -1,160 +0,0 @@ -package archive - -import ( - "context" - "fmt" - "strings" - - "github.com/containers/image/docker/reference" - ctrImage "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for local Docker archives. -var Transport = archiveTransport{} - -type archiveTransport struct{} - -func (t archiveTransport) Name() string { - return "docker-archive" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in archiveReference.PolicyConfigurationIdentity. - return errors.New(`docker-archive: does not support any scopes except the default "" one`) -} - -// archiveReference is an ImageReference for Docker images. -type archiveReference struct { - // only used for destinations - // archiveReference.destinationRef is optional and can be nil for destinations as well. - destinationRef reference.NamedTagged - path string -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - if refString == "" { - return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) - } - - parts := strings.SplitN(refString, ":", 2) - path := parts[0] - var destinationRef reference.NamedTagged - - // A :tag was specified, which is only necessary for destinations. - if len(parts) == 2 { - ref, err := reference.ParseNormalizedNamed(parts[1]) - if err != nil { - return nil, errors.Wrapf(err, "docker-archive parsing reference") - } - ref = reference.TagNameOnly(ref) - - if _, isDigest := ref.(reference.Canonical); isDigest { - return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) - } - - refTagged, isTagged := ref.(reference.NamedTagged) - if !isTagged { - // Really shouldn't be hit... - return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString) - } - destinationRef = refTagged - } - - return archiveReference{ - destinationRef: destinationRef, - path: path, - }, nil -} - -func (ref archiveReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref archiveReference) StringWithinTransport() string { - if ref.destinationRef == nil { - return ref.path - } - return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String()) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref archiveReference) DockerReference() reference.Named { - return ref.destinationRef -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref archiveReference) PolicyConfigurationIdentity() string { - // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. - return "" -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref archiveReference) PolicyConfigurationNamespaces() []string { - // TODO - return []string{} -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, ref) - if err != nil { - return nil, err - } - return ctrImage.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - // Not really supported, for safety reasons. - return errors.New("Deleting images not implemented for docker-archive: images") -} diff --git a/vendor/github.com/containers/image/docker/cache.go b/vendor/github.com/containers/image/docker/cache.go deleted file mode 100644 index 64ad57a7c..000000000 --- a/vendor/github.com/containers/image/docker/cache.go +++ /dev/null @@ -1,23 +0,0 @@ -package docker - -import ( - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" -) - -// bicTransportScope returns a BICTransportScope appropriate for ref. -func bicTransportScope(ref dockerReference) types.BICTransportScope { - // Blobs can be reused across the whole registry. - return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} -} - -// newBICLocationReference returns a BICLocationReference appropriate for ref. -func newBICLocationReference(ref dockerReference) types.BICLocationReference { - // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). - return types.BICLocationReference{Opaque: ref.ref.Name()} -} - -// parseBICLocationReference returns a repository for encoded lr. -func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { - return reference.ParseNormalizedNamed(lr.Opaque) -} diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/docker/daemon/client.go deleted file mode 100644 index 26f1b03b7..000000000 --- a/vendor/github.com/containers/image/docker/daemon/client.go +++ /dev/null @@ -1,85 +0,0 @@ -package daemon - -import ( - "net/http" - "path/filepath" - - "github.com/containers/image/types" - dockerclient "github.com/docker/docker/client" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - // The default API version to be used in case none is explicitly specified - defaultAPIVersion = "1.22" -) - -// NewDockerClient initializes a new API client based on the passed SystemContext. -func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { - host := dockerclient.DefaultDockerHost - if sys != nil && sys.DockerDaemonHost != "" { - host = sys.DockerDaemonHost - } - - // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. - // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s - // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket - // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. - // - // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. - // - // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set - // TLSClientConfig to nil. This can be achieved by using the form `http://` - url, err := dockerclient.ParseHostURL(host) - if err != nil { - return nil, err - } - var httpClient *http.Client - if url.Scheme != "unix" { - if url.Scheme == "http" { - httpClient = httpConfig() - } else { - hc, err := tlsConfig(sys) - if err != nil { - return nil, err - } - httpClient = hc - } - } - - return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) -} - -func tlsConfig(sys *types.SystemContext) (*http.Client, error) { - options := tlsconfig.Options{} - if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { - options.InsecureSkipVerify = true - } - - if sys != nil && sys.DockerDaemonCertPath != "" { - options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") - options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") - options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") - } - - tlsc, err := tlsconfig.Client(options) - if err != nil { - return nil, err - } - - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - CheckRedirect: dockerclient.CheckRedirect, - }, nil -} - -func httpConfig() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: nil, - }, - CheckRedirect: dockerclient.CheckRedirect, - } -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go deleted file mode 100644 index 663086ff6..000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ /dev/null @@ -1,144 +0,0 @@ -package daemon - -import ( - "context" - "io" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type daemonImageDestination struct { - ref daemonReference - mustMatchRuntimeOS bool - *tarfile.Destination // Implements most of types.ImageDestination - // For talking to imageLoadGoroutine - goroutineCancel context.CancelFunc - statusChannel <-chan error - writer *io.PipeWriter - // Other state - committed bool // writer has been closed -} - -// newImageDestination returns a types.ImageDestination for the specified image reference. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { - if ref.ref == nil { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) - } - namedTaggedRef, ok := ref.ref.(reference.NamedTagged) - if !ok { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) - } - - var mustMatchRuntimeOS = true - if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { - mustMatchRuntimeOS = false - } - - c, err := newDockerClient(sys) - if err != nil { - return nil, errors.Wrap(err, "Error initializing docker engine client") - } - - reader, writer := io.Pipe() - // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. - statusChannel := make(chan error, 1) - - goroutineContext, goroutineCancel := context.WithCancel(ctx) - go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) - - return &daemonImageDestination{ - ref: ref, - mustMatchRuntimeOS: mustMatchRuntimeOS, - Destination: tarfile.NewDestination(writer, namedTaggedRef), - goroutineCancel: goroutineCancel, - statusChannel: statusChannel, - writer: writer, - committed: false, - }, nil -} - -// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel -func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { - err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") - defer func() { - logrus.Debugf("docker-daemon: sending done, status %v", err) - statusChannel <- err - }() - defer func() { - if err == nil { - reader.Close() - } else { - reader.CloseWithError(err) - } - }() - - resp, err := c.ImageLoad(ctx, reader, true) - if err != nil { - err = errors.Wrap(err, "Error saving image to docker engine") - return - } - defer resp.Body.Close() -} - -// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved -func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.PreserveOriginal -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *daemonImageDestination) MustMatchRuntimeOS() bool { - return d.mustMatchRuntimeOS -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *daemonImageDestination) Close() error { - if !d.committed { - logrus.Debugf("docker-daemon: Closing tar stream to abort loading") - // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. - // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including - // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the - // net/http version with native Context support in Go 1.7) do not always actually immediately cancel - // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and - // return early if the context is canceled without terminating the goroutine at all. - // So we need this CloseWithError to terminate sending the HTTP request Body - // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending - // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. - // Whether that works or not, closing the PipeWriter seems desirable in any case. - d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) - } - d.goroutineCancel() - - return nil -} - -func (d *daemonImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *daemonImageDestination) Commit(ctx context.Context) error { - logrus.Debugf("docker-daemon: Closing tar stream") - if err := d.Destination.Commit(ctx); err != nil { - return err - } - if err := d.writer.Close(); err != nil { - return err - } - d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. - - logrus.Debugf("docker-daemon: Waiting for status") - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-d.statusChannel: - return err - } -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go deleted file mode 100644 index 89e66eff8..000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ /dev/null @@ -1,62 +0,0 @@ -package daemon - -import ( - "context" - - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -type daemonImageSource struct { - ref daemonReference - *tarfile.Source // Implements most of types.ImageSource -} - -type layerInfo struct { - path string - size int64 -} - -// newImageSource returns a types.ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -// -// It would be great if we were able to stream the input tar as it is being -// sent; but Docker sends the top-level manifest, which determines which paths -// to look for, at the end, so in we will need to seek back and re-read, several times. -// (We could, perhaps, expect an exact sequence, assume that the first plaintext file -// is the config, and that the following len(RootFS) files are the layers, but that feels -// way too brittle.) -func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { - c, err := newDockerClient(sys) - if err != nil { - return nil, errors.Wrap(err, "Error initializing docker engine client") - } - // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. - // Either way ImageSave should create a tarball with exactly one image. - inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) - if err != nil { - return nil, errors.Wrap(err, "Error loading image from docker engine") - } - defer inputStream.Close() - - src, err := tarfile.NewSourceFromStream(inputStream) - if err != nil { - return nil, err - } - return &daemonImageSource{ - ref: ref, - Source: src, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *daemonImageSource) Reference() types.ImageReference { - return s.ref -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *daemonImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go deleted file mode 100644 index 1a265bf76..000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go +++ /dev/null @@ -1,223 +0,0 @@ -package daemon - -import ( - "context" - "fmt" - - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for images managed by a local Docker daemon. -var Transport = daemonTransport{} - -type daemonTransport struct{} - -// Name returns the name of the transport, which must be unique among other transports. -func (t daemonTransport) Name() string { - return "docker-daemon" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { - // ID values cannot be effectively namespaced, and are clearly invalid host:port values. - if _, err := digest.Parse(scope); err == nil { - return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) - } - - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// daemonReference is an ImageReference for images managed by a local Docker daemon -// Exactly one of id and ref can be set. -// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) -// For daemonImageDestination, it must be a ref, which is NamedTagged. -// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. -// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) -type daemonReference struct { - id digest.Digest - ref reference.Named // !reference.IsNameOnly -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. - // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). - - // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). - // reference.ParseAnyReference interprets such strings as digests. - if dgst, err := digest.Parse(refString); err == nil { - // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. - // Other digest references are ambiguous, so refuse them. - if dgst.Algorithm() != digest.Canonical { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) - } - return NewReference(dgst, nil) - } - - ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values - if err != nil { - return nil, err - } - if reference.FamiliarName(ref) == digest.Canonical.String() { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) - } - return NewReference("", ref) -} - -// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) -func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { - if id != "" && ref != nil { - return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") - } - if ref != nil { - if reference.IsNameOnly(ref) { - return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. - // This MAY be accepted in the future. - // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop - // the tag or the digest first?) - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") - } - } - return daemonReference{ - id: id, - ref: ref, - }, nil -} - -func (ref daemonReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; -// instead, see transports.ImageName(). -func (ref daemonReference) StringWithinTransport() string { - switch { - case ref.id != "": - return ref.id.String() - case ref.ref != nil: - return reference.FamiliarString(ref.ref) - default: // Coverage: Should never happen, NewReference above should refuse such values. - panic("Internal inconsistency: daemonReference has empty id and nil ref") - } -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref daemonReference) DockerReference() reference.Named { - return ref.ref // May be nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref daemonReference) PolicyConfigurationIdentity() string { - // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. - // But the existence of image IDs means that we can’t truly well namespace the input: - // a single image can be namespaced either using the name or the ID depending on how it is named. - // - // That’s fairly unexpected, but we have to cope somehow. - // - // So, use the ordinary docker/policyconfiguration namespacing for named images. - // image IDs all fall into the root namespace. - // Users can set up the root namespace to be either untrusted or rejected, - // and to set up specific trust for named namespaces. This allows verifying image - // identity when a name is known, and unnamed images would be untrusted or rejected. - switch { - case ref.id != "": - return "" // This still allows using the default "" scope to define a global policy for ID-identified images. - case ref.ref != nil: - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res - default: // Coverage: Should never happen, NewReference above should refuse such values. - panic("Internal inconsistency: daemonReference has empty id and nil ref") - } -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref daemonReference) PolicyConfigurationNamespaces() []string { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - switch { - case ref.id != "": - return []string{} - case ref.ref != nil: - return policyconfiguration.DockerReferenceNamespaces(ref.ref) - default: // Coverage: Should never happen, NewReference above should refuse such values. - panic("Internal inconsistency: daemonReference has empty id and nil ref") - } -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - // Should this just untag the image? Should this stop running containers? - // The semantics is not quite as clear as for remote repositories. - // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. - return errors.Errorf("Deleting images not implemented for docker-daemon: images") -} diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go deleted file mode 100644 index 48427f3d3..000000000 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ /dev/null @@ -1,643 +0,0 @@ -package docker - -import ( - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/docker/config" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/pkg/tlsclientconfig" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/client" - "github.com/docker/go-connections/tlsconfig" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - dockerHostname = "docker.io" - dockerV1Hostname = "index.docker.io" - dockerRegistry = "registry-1.docker.io" - - resolvedPingV2URL = "%s://%s/v2/" - resolvedPingV1URL = "%s://%s/v1/_ping" - tagsPath = "/v2/%s/tags/list" - manifestPath = "/v2/%s/manifests/%s" - blobsPath = "/v2/%s/blobs/%s" - blobUploadPath = "/v2/%s/blobs/uploads/" - extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" - - minimumTokenLifetimeSeconds = 60 - - extensionSignatureSchemaVersion = 2 // extensionSignature.Version - extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type -) - -var ( - // ErrV1NotSupported is returned when we're trying to talk to a - // docker V1 registry. - ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") - // ErrUnauthorizedForCredentials is returned when the status code returned is 401 - ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") - systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} -) - -// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: -// signature represents a Docker image signature. -type extensionSignature struct { - Version int `json:"schemaVersion"` // Version specifies the schema version - Name string `json:"name"` // Name must be in "sha256:@signatureName" format - Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" - Content []byte `json:"content"` // Content contains the signature -} - -// signatureList represents list of Docker image signatures. -type extensionSignatureList struct { - Signatures []extensionSignature `json:"signatures"` -} - -type bearerToken struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - expirationTime time.Time -} - -// dockerClient is configuration for dealing with a single Docker registry. -type dockerClient struct { - // The following members are set by newDockerClient and do not change afterwards. - sys *types.SystemContext - registry string - - // tlsClientConfig is setup by newDockerClient and will be used and updated - // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. - tlsClientConfig *tls.Config - // The following members are not set by newDockerClient and must be set by callers if needed. - username string - password string - signatureBase signatureStorageBase - scope authScope - - // The following members are detected registry properties: - // They are set after a successful detectProperties(), and never change afterwards. - client *http.Client - scheme string - challenges []challenge - supportsSignatures bool - - // Private state for setupRequestAuth (key: string, value: bearerToken) - tokenCache sync.Map - // Private state for detectProperties: - detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. - detectPropertiesError error // detectPropertiesError caches the initial error. -} - -type authScope struct { - remoteName string - actions string -} - -// sendAuth determines whether we need authentication for v2 or v1 endpoint. -type sendAuth int - -const ( - // v2 endpoint with authentication. - v2Auth sendAuth = iota - // v1 endpoint with authentication. - // TODO: Get v1Auth working - // v1Auth - // no authentication, works for both v1 and v2. - noAuth -) - -func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { - token := new(bearerToken) - if err := json.Unmarshal(blob, &token); err != nil { - return nil, err - } - if token.Token == "" { - token.Token = token.AccessToken - } - if token.ExpiresIn < minimumTokenLifetimeSeconds { - token.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) - } - if token.IssuedAt.IsZero() { - token.IssuedAt = time.Now().UTC() - } - token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) - return token, nil -} - -// this is cloned from docker/go-connections because upstream docker has changed -// it and make deps here fails otherwise. -// We'll drop this once we upgrade to docker 1.13.x deps. -func serverDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, - } -} - -// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. -func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { - if sys != nil && sys.DockerCertPath != "" { - return sys.DockerCertPath, nil - } - if sys != nil && sys.DockerPerHostCertDirPath != "" { - return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil - } - - var ( - hostCertDir string - fullCertDirPath string - ) - for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { - if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) - } else { - hostCertDir = systemPerHostCertDirPath - } - - fullCertDirPath = filepath.Join(hostCertDir, hostPort) - _, err := os.Stat(fullCertDirPath) - if err == nil { - break - } - if os.IsNotExist(err) { - continue - } - if os.IsPermission(err) { - logrus.Debugf("error accessing certs directory due to permissions: %v", err) - continue - } - if err != nil { - return "", err - } - } - return fullCertDirPath, nil -} - -// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) -// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) -func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { - registry := reference.Domain(ref.ref) - username, password, err := config.GetAuthentication(sys, registry) - if err != nil { - return nil, errors.Wrapf(err, "error getting username and password") - } - sigBase, err := configuredSignatureStorageBase(sys, ref, write) - if err != nil { - return nil, err - } - - client, err := newDockerClient(sys, registry, ref.ref.Name()) - if err != nil { - return nil, err - } - client.username = username - client.password = password - client.signatureBase = sigBase - client.scope.actions = actions - client.scope.remoteName = reference.Path(ref.ref) - return client, nil -} - -// newDockerClient returns a new dockerClient instance for the given registry -// and reference. The reference is used to query the registry configuration -// and can either be a registry (e.g, "registry.com[:5000]"), a repository -// (e.g., "registry.com[:5000][/some/namespace]/repo"). -// Please note that newDockerClient does not set all members of dockerClient -// (e.g., username and password); those must be set by callers if necessary. -func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { - hostName := registry - if registry == dockerHostname { - registry = dockerRegistry - } - tlsClientConfig := serverDefault() - - // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, - // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible - // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because - // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is - // undocumented and may change if docker/docker changes. - certDir, err := dockerCertDir(sys, hostName) - if err != nil { - return nil, err - } - if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { - return nil, err - } - - // Check if TLS verification shall be skipped (default=false) which can - // be specified in the sysregistriesv2 configuration. - skipVerify := false - reg, err := sysregistriesv2.FindRegistry(sys, reference) - if err != nil { - return nil, errors.Wrapf(err, "error loading registries") - } - if reg != nil { - if reg.Blocked { - return nil, fmt.Errorf("registry %s is blocked in %s", reg.Prefix, sysregistriesv2.ConfigPath(sys)) - } - skipVerify = reg.Insecure - } - tlsClientConfig.InsecureSkipVerify = skipVerify - - return &dockerClient{ - sys: sys, - registry: registry, - tlsClientConfig: tlsClientConfig, - }, nil -} - -// CheckAuth validates the credentials by attempting to log into the registry -// returns an error if an error occurred while making the http request or the status code received was 401 -func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { - client, err := newDockerClient(sys, registry, registry) - if err != nil { - return errors.Wrapf(err, "error creating new docker client") - } - client.username = username - client.password = password - - resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) - if err != nil { - return err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - return nil - case http.StatusUnauthorized: - return ErrUnauthorizedForCredentials - default: - return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode)) - } -} - -// SearchResult holds the information of each matching image -// It matches the output returned by the v1 endpoint -type SearchResult struct { - Name string `json:"name"` - Description string `json:"description"` - // StarCount states the number of stars the image has - StarCount int `json:"star_count"` - IsTrusted bool `json:"is_trusted"` - // IsAutomated states whether the image is an automated build - IsAutomated bool `json:"is_automated"` - // IsOfficial states whether the image is an official build - IsOfficial bool `json:"is_official"` -} - -// SearchRegistry queries a registry for images that contain "image" in their name -// The limit is the max number of results desired -// Note: The limit value doesn't work with all registries -// for example registry.access.redhat.com returns all the results without limiting it to the limit value -func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { - type V2Results struct { - // Repositories holds the results returned by the /v2/_catalog endpoint - Repositories []string `json:"repositories"` - } - type V1Results struct { - // Results holds the results returned by the /v1/search endpoint - Results []SearchResult `json:"results"` - } - v2Res := &V2Results{} - v1Res := &V1Results{} - - // Get credentials from authfile for the underlying hostname - username, password, err := config.GetAuthentication(sys, registry) - if err != nil { - return nil, errors.Wrapf(err, "error getting username and password") - } - - // The /v2/_catalog endpoint has been disabled for docker.io therefore - // the call made to that endpoint will fail. So using the v1 hostname - // for docker.io for simplicity of implementation and the fact that it - // returns search results. - hostname := registry - if registry == dockerHostname { - hostname = dockerV1Hostname - } - - client, err := newDockerClient(sys, hostname, registry) - if err != nil { - return nil, errors.Wrapf(err, "error creating new docker client") - } - client.username = username - client.password = password - - // Only try the v1 search endpoint if the search query is not empty. If it is - // empty skip to the v2 endpoint. - if image != "" { - // set up the query values for the v1 endpoint - u := url.URL{ - Path: "/v1/search", - } - q := u.Query() - q.Set("q", image) - q.Set("n", strconv.Itoa(limit)) - u.RawQuery = q.Encode() - - logrus.Debugf("trying to talk to v1 search endpoint") - resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil) - if err != nil { - logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) - } else { - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) - } else { - if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { - return nil, err - } - return v1Res.Results, nil - } - } - } - - logrus.Debugf("trying to talk to v2 search endpoint") - resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil) - if err != nil { - logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) - } else { - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) - } else { - if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { - return nil, err - } - searchRes := []SearchResult{} - for _, repo := range v2Res.Repositories { - if strings.Contains(repo, image) { - res := SearchResult{ - Name: repo, - } - searchRes = append(searchRes, res) - } - } - return searchRes, nil - } - } - - return nil, errors.Wrapf(err, "couldn't search registry %q", registry) -} - -// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. -func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { - if err := c.detectProperties(ctx); err != nil { - return nil, err - } - - url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) - return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) -} - -// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// streamLen, if not -1, specifies the length of the data expected on stream. -// makeRequest should generally be preferred. -// TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequest(method, url, stream) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. - req.ContentLength = streamLen - } - req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") - for n, h := range headers { - for _, hh := range h { - req.Header.Add(n, hh) - } - } - if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { - req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) - } - if auth == v2Auth { - if err := c.setupRequestAuth(req, extraScope); err != nil { - return nil, err - } - } - logrus.Debugf("%s %s", method, url) - res, err := c.client.Do(req) - if err != nil { - return nil, err - } - return res, nil -} - -// we're using the challenges from the /v2/ ping response and not the one from the destination -// URL in this request because: -// -// 1) docker does that as well -// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request -// -// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up -func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { - if len(c.challenges) == 0 { - return nil - } - schemeNames := make([]string, 0, len(c.challenges)) - for _, challenge := range c.challenges { - schemeNames = append(schemeNames, challenge.Scheme) - switch challenge.Scheme { - case "basic": - req.SetBasicAuth(c.username, c.password) - return nil - case "bearer": - cacheKey := "" - scopes := []authScope{c.scope} - if extraScope != nil { - // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). - cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) - scopes = append(scopes, *extraScope) - } - var token bearerToken - t, inCache := c.tokenCache.Load(cacheKey) - if inCache { - token = t.(bearerToken) - } - if !inCache || time.Now().After(token.expirationTime) { - t, err := c.getBearerToken(req.Context(), challenge, scopes) - if err != nil { - return err - } - token = *t - c.tokenCache.Store(cacheKey, token) - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) - return nil - default: - logrus.Debugf("no handler for %s authentication", challenge.Scheme) - } - } - logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) - return nil -} - -func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return nil, errors.Errorf("missing realm in bearer auth challenge") - } - - authReq, err := http.NewRequest("GET", realm, nil) - if err != nil { - return nil, err - } - authReq = authReq.WithContext(ctx) - getParams := authReq.URL.Query() - if c.username != "" { - getParams.Add("account", c.username) - } - if service, ok := challenge.Parameters["service"]; ok && service != "" { - getParams.Add("service", service) - } - for _, scope := range scopes { - if scope.remoteName != "" && scope.actions != "" { - getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) - } - } - authReq.URL.RawQuery = getParams.Encode() - if c.username != "" && c.password != "" { - authReq.SetBasicAuth(c.username, c.password) - } - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) - res, err := c.client.Do(authReq) - if err != nil { - return nil, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusUnauthorized: - return nil, ErrUnauthorizedForCredentials - case http.StatusOK: - break - default: - return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) - } - tokenBlob, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - return newBearerTokenFromJSONBlob(tokenBlob) -} - -// detectPropertiesHelper performs the work of detectProperties which executes -// it at most once. -func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { - // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly - // specified by the system context - if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { - c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue - } - tr := tlsclientconfig.NewTransport() - tr.TLSClientConfig = c.tlsClientConfig - c.client = &http.Client{Transport: tr} - - ping := func(scheme string) error { - url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) - resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) - if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) - return err - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - c.challenges = parseAuthHeader(resp.Header) - c.scheme = scheme - c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" - return nil - } - err := ping("https") - if err != nil && c.tlsClientConfig.InsecureSkipVerify { - err = ping("http") - } - if err != nil { - err = errors.Wrap(err, "pinging docker registry returned") - if c.sys != nil && c.sys.DockerDisableV1Ping { - return err - } - // best effort to understand if we're talking to a V1 registry - pingV1 := func(scheme string) bool { - url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) - resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) - if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) - return false - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return false - } - return true - } - isV1 := pingV1("https") - if !isV1 && c.tlsClientConfig.InsecureSkipVerify { - isV1 = pingV1("http") - } - if isV1 { - err = ErrV1NotSupported - } - } - return err -} - -// detectProperties detects various properties of the registry. -// See the dockerClient documentation for members which are affected by this. -func (c *dockerClient) detectProperties(ctx context.Context) error { - c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) - return c.detectPropertiesError -} - -// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, -// using the original data structures. -func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) - res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - var parsedBody extensionSignatureList - if err := json.Unmarshal(body, &parsedBody); err != nil { - return nil, errors.Wrapf(err, "Error decoding signature list") - } - return &parsedBody, nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go deleted file mode 100644 index 744667f54..000000000 --- a/vendor/github.com/containers/image/docker/docker_image.go +++ /dev/null @@ -1,107 +0,0 @@ -package docker - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods -// which are specific to Docker. -type Image struct { - types.ImageCloser - src *dockerImageSource -} - -// newImage returns a new Image interface type after setting up -// a client to the registry hosting the given image. -// The caller must call .Close() on the returned Image. -func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { - s, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, s) - if err != nil { - return nil, err - } - return &Image{ImageCloser: img, src: s}, nil -} - -// SourceRefFullName returns a fully expanded name for the repository this image is in. -func (i *Image) SourceRefFullName() string { - return i.src.ref.ref.Name() -} - -// GetRepositoryTags list all tags available in the repository. The tag -// provided inside the ImageReference will be ignored. (This is a -// backward-compatible shim method which calls the module-level -// GetRepositoryTags) -func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { - return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref) -} - -// GetRepositoryTags list all tags available in the repository. The tag -// provided inside the ImageReference will be ignored. -func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { - dr, ok := ref.(dockerReference) - if !ok { - return nil, errors.Errorf("ref must be a dockerReference") - } - - path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) - client, err := newDockerClientFromRef(sys, dr, false, "pull") - if err != nil { - return nil, errors.Wrap(err, "failed to create client") - } - - tags := make([]string, 0) - - for { - res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - // print url also - return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) - } - - var tagsHolder struct { - Tags []string - } - if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { - return nil, err - } - tags = append(tags, tagsHolder.Tags...) - - link := res.Header.Get("Link") - if link == "" { - break - } - - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) - if err != nil { - return tags, err - } - - // can be relative or absolute, but we only want the path (and I - // guess we're in trouble if it forwards to a new place...) - path = linkURL.Path - if linkURL.RawQuery != "" { - path += "?" - path += linkURL.RawQuery - } - } - return tags, nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go deleted file mode 100644 index c116cbec3..000000000 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ /dev/null @@ -1,611 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache/none" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerImageDestination struct { - ref dockerReference - c *dockerClient - // State - manifestDigest digest.Digest // or "" if not yet known. -} - -// newImageDestination creates a new ImageDestination for the specified image reference. -func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { - c, err := newDockerClientFromRef(sys, ref, true, "pull,push") - if err != nil { - return nil, err - } - return &dockerImageDestination{ - ref: ref, - c: c, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dockerImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dockerImageDestination) Close() error { - return nil -} - -func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { - if err := d.c.detectProperties(ctx); err != nil { - return err - } - switch { - case d.c.signatureBase != nil: - return nil - case d.c.supportsSignatures: - return nil - default: - return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") - } -} - -func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *dockerImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. -} - -// sizeCounter is an io.Writer which only counts the total size of its input. -type sizeCounter struct{ size int64 } - -func (c *sizeCounter) Write(p []byte) (n int, err error) { - c.size += int64(len(p)) - return len(p), nil -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *dockerImageDestination) HasThreadSafePutBlob() bool { - return true -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - if inputInfo.Digest.String() != "" { - // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. - // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. - // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. - haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false) - if err != nil { - return types.BlobInfo{}, err - } - if haveBlob { - return reusedInfo, nil - } - } - - // FIXME? Chunked upload, progress reporting, etc. - uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) - logrus.Debugf("Uploading %s", uploadPath) - res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil) - if err != nil { - return types.BlobInfo{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusAccepted { - logrus.Debugf("Error initiating layer upload, response %#v", *res) - return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry) - } - uploadLocation, err := res.Location() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") - } - - digester := digest.Canonical.Digester() - sizeCounter := &sizeCounter{} - tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) - res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil) - if err != nil { - logrus.Debugf("Error uploading layer chunked, response %#v", res) - return types.BlobInfo{}, err - } - defer res.Body.Close() - computedDigest := digester.Digest() - - uploadLocation, err = res.Location() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") - } - - // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) - - locationQuery := uploadLocation.Query() - // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 - locationQuery.Set("digest", computedDigest.String()) - uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) - if err != nil { - return types.BlobInfo{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - logrus.Debugf("Error uploading layer, response %#v", *res) - return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation) - } - - logrus.Debugf("Upload of layer %s complete", computedDigest) - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref)) - return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil -} - -// blobExists returns true iff repo contains a blob with digest, and if so, also its size. -// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { - checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) - logrus.Debugf("Checking %s", checkPath) - res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope) - if err != nil { - return false, -1, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusOK: - logrus.Debugf("... already exists") - return true, getBlobSize(res), nil - case http.StatusUnauthorized: - logrus.Debugf("... not authorized") - return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name()) - case http.StatusNotFound: - logrus.Debugf("... not present") - return false, -1, nil - default: - return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) - } -} - -// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. -func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { - u := url.URL{ - Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), - RawQuery: url.Values{ - "mount": {srcDigest.String()}, - "from": {reference.Path(srcRepo)}, - }.Encode(), - } - mountPath := u.String() - logrus.Debugf("Trying to mount %s", mountPath) - res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope) - if err != nil { - return err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusCreated: - logrus.Debugf("... mount OK") - return nil - case http.StatusAccepted: - // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. - // Abort, and let the ultimate caller do an upload when its ready, instead. - // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. - uploadLocation, err := res.Location() - if err != nil { - return errors.Wrap(err, "Error determining upload URL after a mount attempt") - } - logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) - res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) - if err != nil { - logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) - } else { - defer res2.Body.Close() - if res2.StatusCode != http.StatusNoContent { - logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) - } - } - // Anyway, if canceling the upload fails, ignore it and return the more important error: - return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) - default: - logrus.Debugf("Error mounting, response %#v", *res) - return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) - } -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - - // First, check whether the blob happens to already exist at the destination. - exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) - if err != nil { - return false, types.BlobInfo{}, err - } - if exists { - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil - } - - // Then try reusing blobs from other locations. - for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { - candidateRepo, err := parseBICLocationReference(candidate.Location) - if err != nil { - logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) - continue - } - logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) - - // Sanity checks: - if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { - logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) - continue - } - if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { - logrus.Debug("... Already tried the primary destination") - continue - } - - // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. - - // Checking candidateRepo, and mounting from it, requires an - // expanded token scope. - extraScope := &authScope{ - remoteName: reference.Path(candidateRepo), - actions: "pull", - } - // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. - // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. - // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. - // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. - // Even worse, docker/distribution does not actually reasonably implement canceling uploads - // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); - // so, be a nice client and don't create unnecesary upload sessions on the server. - exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) - if err != nil { - logrus.Debugf("... Failed: %v", err) - continue - } - if !exists { - // FIXME? Should we drop the blob from cache here (and elsewhere?)? - continue // logrus.Debug() already happened in blobExists - } - if candidateRepo.Name() != d.ref.ref.Name() { - if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { - logrus.Debugf("... Mount failed: %v", err) - continue - } - } - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil - } - - return false, types.BlobInfo{}, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - d.manifestDigest = digest - - refTail, err := d.ref.tagOrDigest() - if err != nil { - return err - } - path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) - - headers := map[string][]string{} - mimeType := manifest.GuessMIMEType(m) - if mimeType != "" { - headers["Content-Type"] = []string{mimeType} - } - res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil) - if err != nil { - return err - } - defer res.Body.Close() - if !successStatus(res.StatusCode) { - err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name()) - if isManifestInvalidError(errors.Cause(err)) { - err = types.ManifestTypeRejectedError{Err: err} - } - return err - } - return nil -} - -// successStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func successStatus(status int) bool { - return status >= 200 && status <= 399 -} - -// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. -func isManifestInvalidError(err error) bool { - errors, ok := err.(errcode.Errors) - if !ok || len(errors) == 0 { - return false - } - err = errors[0] - ec, ok := err.(errcode.ErrorCoder) - if !ok { - return false - } - - switch ec.ErrorCode() { - // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. - case v2.ErrorCodeManifestInvalid: - return true - // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) - // when uploading to a tag (because it can’t find a matching tag inside the manifest) - case v2.ErrorCodeTagInvalid: - return true - // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when - // uploading an OCI manifest that is (correctly, according to the spec) missing - // a top-level media type. See libpod issue #1719 - // FIXME: remove this case when ECR behavior is fixed - case errcode.ErrorCodeUnsupported: - return strings.Contains(err.Error(), "Invalid JSON syntax") - default: - return false - } -} - -func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - // Do not fail if we don’t really need to support signatures. - if len(signatures) == 0 { - return nil - } - if err := d.c.detectProperties(ctx); err != nil { - return err - } - switch { - case d.c.signatureBase != nil: - return d.putSignaturesToLookaside(signatures) - case d.c.supportsSignatures: - return d.putSignaturesToAPIExtension(ctx, signatures) - default: - return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") - } -} - -// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error { - // FIXME? This overwrites files one at a time, definitely not atomic. - // A failure when updating signatures with a reordered copy could lose some of them. - - // Skip dealing with the manifest digest if not necessary. - if len(signatures) == 0 { - return nil - } - - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i, signature := range signatures { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - err := d.putOneSignature(url, signature) - if err != nil { - return err - } - } - // Remove any other signatures, if present. - // We stop at the first missing signature; if a previous deleting loop aborted - // prematurely, this may not clean up all of them, but one missing signature - // is enough for dockerImageSource to stop looking for other signatures, so that - // is sufficient. - for i := len(signatures); ; i++ { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - missing, err := d.c.deleteOneSignature(url) - if err != nil { - return err - } - if missing { - break - } - } - - return nil -} - -// putOneSignature stores one signature to url. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { - switch url.Scheme { - case "file": - logrus.Debugf("Writing to %s", url.Path) - err := os.MkdirAll(filepath.Dir(url.Path), 0755) - if err != nil { - return err - } - err = ioutil.WriteFile(url.Path, signature, 0644) - if err != nil { - return err - } - return nil - - case "http", "https": - return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) - default: - return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) - } -} - -// deleteOneSignature deletes a signature from url, if it exists. -// If it successfully determines that the signature does not exist, returns (true, nil) -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { - switch url.Scheme { - case "file": - logrus.Debugf("Deleting %s", url.Path) - err := os.Remove(url.Path) - if err != nil && os.IsNotExist(err) { - return true, nil - } - return false, err - - case "http", "https": - return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) - default: - return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) - } -} - -// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. -func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error { - // Skip dealing with the manifest digest, or reading the old state, if not necessary. - if len(signatures) == 0 { - return nil - } - - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures, - // but the X-Registry-Supports-Signatures API extension does not support that yet. - - existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range existingSignatures.Signatures { - existingSigNames[sig.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range existingSignatures.Signatures { - if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "Error generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - sig := extensionSignature{ - Version: extensionSignatureSchemaVersion, - Name: signatureName, - Type: extensionSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - if err != nil { - return err - } - - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String()) - res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - body, err := ioutil.ReadAll(res.Body) - if err == nil { - logrus.Debugf("Error body %s", string(body)) - } - logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) - return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry) - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) Commit(ctx context.Context) error { - return nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go deleted file mode 100644 index 6951f31e9..000000000 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ /dev/null @@ -1,451 +0,0 @@ -package docker - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "os" - "strconv" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/client" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerImageSource struct { - ref dockerReference - c *dockerClient - // State - cachedManifest []byte // nil if not loaded yet - cachedManifestMIMEType string // Only valid if cachedManifest != nil -} - -// newImageSource creates a new ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { - registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) - if err != nil { - return nil, errors.Wrapf(err, "error loading registries configuration") - } - if registry == nil { - // No configuration was found for the provided reference, so use the - // equivalent of a default configuration. - registry = &sysregistriesv2.Registry{ - Endpoint: sysregistriesv2.Endpoint{ - Location: ref.ref.String(), - }, - Prefix: ref.ref.String(), - } - } - - primaryDomain := reference.Domain(ref.ref) - // Check all endpoints for the manifest availability. If we find one that does - // contain the image, it will be used for all future pull actions. Always try the - // non-mirror original location last; this both transparently handles the case - // of no mirrors configured, and ensures we return the error encountered when - // acessing the upstream location if all endpoints fail. - manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint") - pullSources, err := registry.PullSourcesFromReference(ref.ref) - if err != nil { - return nil, err - } - for _, pullSource := range pullSources { - logrus.Debugf("Trying to pull %q", pullSource.Reference) - dockerRef, err := newReference(pullSource.Reference) - if err != nil { - return nil, err - } - - endpointSys := sys - // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. - if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain { - copy := *endpointSys - copy.DockerAuthConfig = nil - endpointSys = © - } - - client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull") - if err != nil { - return nil, err - } - client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure - - testImageSource := &dockerImageSource{ - ref: dockerRef, - c: client, - } - - manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx) - if manifestLoadErr == nil { - return testImageSource, nil - } - } - return nil, manifestLoadErr -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dockerImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dockerImageSource) Close() error { - return nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *dockerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} - -// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) -// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. -func simplifyContentType(contentType string) string { - if contentType == "" { - return contentType - } - mimeType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return "" - } - return mimeType -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return s.fetchManifest(ctx, instanceDigest.String()) - } - err := s.ensureManifestIsLoaded(ctx) - if err != nil { - return nil, "", err - } - return s.cachedManifest, s.cachedManifestMIMEType, nil -} - -func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { - path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) - if err != nil { - return nil, "", err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name()) - } - manblob, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, "", err - } - return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil -} - -// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType -// -// ImageSource implementations are not required or expected to do any caching, -// but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) -// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious -// signature verification failures when pulling while a tag is being updated. -func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { - if s.cachedManifest != nil { - return nil - } - - reference, err := s.ref.tagOrDigest() - if err != nil { - return err - } - - manblob, mt, err := s.fetchManifest(ctx, reference) - if err != nil { - return err - } - // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. - s.cachedManifest = manblob - s.cachedManifestMIMEType = mt - return nil -} - -func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - var ( - resp *http.Response - err error - ) - for _, url := range urls { - resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) - if err == nil { - if resp.StatusCode != http.StatusOK { - err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode)) - logrus.Debug(err) - continue - } - break - } - } - if err != nil { - return nil, 0, err - } - return resp.Body, getBlobSize(resp), nil -} - -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return size -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *dockerImageSource) HasThreadSafeGetBlob() bool { - return true -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - return s.getExternalBlob(ctx, info.URLs) - } - - path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) - if err != nil { - return nil, 0, err - } - if res.StatusCode != http.StatusOK { - // print url also - return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) - } - cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref)) - return res.Body, getBlobSize(res), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if err := s.c.detectProperties(ctx); err != nil { - return nil, err - } - switch { - case s.c.signatureBase != nil: - return s.getSignaturesFromLookaside(ctx, instanceDigest) - case s.c.supportsSignatures: - return s.getSignaturesFromAPIExtension(ctx, instanceDigest) - default: - return [][]byte{}, nil - } -} - -// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, -// or finally, from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { - if instanceDigest != nil { - return *instanceDigest, nil - } - if digested, ok := s.ref.ref.(reference.Digested); ok { - d := digested.Digest() - if d.Algorithm() == digest.Canonical { - return d, nil - } - } - if err := s.ensureManifestIsLoaded(ctx); err != nil { - return "", err - } - return manifest.Digest(s.cachedManifest) -} - -// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return nil, err - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := [][]byte{} - for i := 0; ; i++ { - url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) - if url == nil { - return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - signature, missing, err := s.getOneSignature(ctx, url) - if err != nil { - return nil, err - } - if missing { - break - } - signatures = append(signatures, signature) - } - return signatures, nil -} - -// getOneSignature downloads one signature from url. -// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { - switch url.Scheme { - case "file": - logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) - if err != nil { - if os.IsNotExist(err) { - return nil, true, nil - } - return nil, false, err - } - return sig, false, nil - - case "http", "https": - logrus.Debugf("GET %s", url) - req, err := http.NewRequest("GET", url.String(), nil) - if err != nil { - return nil, false, err - } - req = req.WithContext(ctx) - res, err := s.c.client.Do(req) - if err != nil { - return nil, false, err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return nil, true, nil - } else if res.StatusCode != http.StatusOK { - return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) - } - sig, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, false, err - } - return sig, false, nil - - default: - return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) - } -} - -// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return nil, err - } - - parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest) - if err != nil { - return nil, err - } - - var sigs [][]byte - for _, sig := range parsedBody.Signatures { - if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// deleteImage deletes the named image from the registry, if supported. -func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { - // docker/distribution does not document what action should be used for deleting images. - // - // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. - // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. - // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). - // - // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". - c, err := newDockerClientFromRef(sys, ref, true, "*") - if err != nil { - return err - } - - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - refTail, err := ref.tagOrDigest() - if err != nil { - return err - } - getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) - get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil) - if err != nil { - return err - } - defer get.Body.Close() - manifestBody, err := ioutil.ReadAll(get.Body) - if err != nil { - return err - } - switch get.StatusCode { - case http.StatusOK: - case http.StatusNotFound: - return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) - default: - return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) - } - - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) - - // When retrieving the digest from a registry >= 2.3 use the following header: - // "Accept": "application/vnd.docker.distribution.manifest.v2+json" - delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil) - if err != nil { - return err - } - defer delete.Body.Close() - - body, err := ioutil.ReadAll(delete.Body) - if err != nil { - return err - } - if delete.StatusCode != http.StatusAccepted { - return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) - } - - if c.signatureBase != nil { - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err - } - - for i := 0; ; i++ { - url := signatureStorageURL(c.signatureBase, manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - missing, err := c.deleteOneSignature(url) - if err != nil { - return err - } - if missing { - break - } - } - } - - return nil -} diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go deleted file mode 100644 index 45da7c96f..000000000 --- a/vendor/github.com/containers/image/docker/docker_transport.go +++ /dev/null @@ -1,168 +0,0 @@ -package docker - -import ( - "context" - "fmt" - "strings" - - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for Docker registry-hosted images. -var Transport = dockerTransport{} - -type dockerTransport struct{} - -func (t dockerTransport) Name() string { - return "docker" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// dockerReference is an ImageReference for Docker images. -type dockerReference struct { - ref reference.Named // By construction we know that !reference.IsNameOnly(ref) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - if !strings.HasPrefix(refString, "//") { - return nil, errors.Errorf("docker: image reference %s does not start with //", refString) - } - ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) - if err != nil { - return nil, err - } - ref = reference.TagNameOnly(ref) - return NewReference(ref) -} - -// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). -func NewReference(ref reference.Named) (types.ImageReference, error) { - return newReference(ref) -} - -// newReference returns a dockerReference for a named reference. -func newReference(ref reference.Named) (dockerReference, error) { - if reference.IsNameOnly(ref) { - return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // The docker/distribution API does not really support that (we can’t ask for an image with a specific - // tag and digest), so fail. This MAY be accepted in the future. - // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop - // the tag or the digest first?) - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported") - } - - return dockerReference{ - ref: ref, - }, nil -} - -func (ref dockerReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dockerReference) StringWithinTransport() string { - return "//" + reference.FamiliarString(ref.ref) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dockerReference) DockerReference() reference.Named { - return ref.ref -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dockerReference) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dockerReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.ref) -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - return newImage(ctx, sys, ref) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return deleteImage(ctx, sys, ref) -} - -// tagOrDigest returns a tag or digest from the reference. -func (ref dockerReference) tagOrDigest() (string, error) { - if ref, ok := ref.ref.(reference.Canonical); ok { - return ref.Digest().String(), nil - } - if ref, ok := ref.ref.(reference.NamedTagged); ok { - return ref.Tag(), nil - } - // This should not happen, NewReference above refuses reference.IsNameOnly values. - return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) -} diff --git a/vendor/github.com/containers/image/docker/lookaside.go b/vendor/github.com/containers/image/docker/lookaside.go deleted file mode 100644 index 860f1ad5e..000000000 --- a/vendor/github.com/containers/image/docker/lookaside.go +++ /dev/null @@ -1,202 +0,0 @@ -package docker - -import ( - "fmt" - "io/ioutil" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/ghodss/yaml" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' -var systemRegistriesDirPath = builtinRegistriesDirPath - -// builtinRegistriesDirPath is the path to registries.d. -// DO NOT change this, instead see systemRegistriesDirPath above. -const builtinRegistriesDirPath = "/etc/containers/registries.d" - -// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. -// NOTE: Keep this in sync with docs/registries.d.md! -type registryConfiguration struct { - DefaultDocker *registryNamespace `json:"default-docker"` - // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), - Docker map[string]registryNamespace `json:"docker"` -} - -// registryNamespace defines lookaside locations for a single namespace. -type registryNamespace struct { - SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. - SigStoreStaging string `json:"sigstore-staging"` // For writing only. -} - -// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. -// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below. -type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported. - -// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. -func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) { - // FIXME? Loading and parsing the config could be cached across calls. - dirPath := registriesDirPath(sys) - logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) - config, err := loadAndMergeConfig(dirPath) - if err != nil { - return nil, err - } - - topLevel := config.signatureTopLevel(ref, write) - if topLevel == "" { - return nil, nil - } - - url, err := url.Parse(topLevel) - if err != nil { - return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) - } - // NOTE: Keep this in sync with docs/signature-protocols.md! - // FIXME? Restrict to explicitly supported schemes? - repo := reference.Path(ref.ref) // Note that this is without a tag or digest. - if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references - return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) - } - url.Path = url.Path + "/" + repo - return url, nil -} - -// registriesDirPath returns a path to registries.d -func registriesDirPath(sys *types.SystemContext) string { - if sys != nil { - if sys.RegistriesDirPath != "" { - return sys.RegistriesDirPath - } - if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) - } - } - return systemRegistriesDirPath -} - -// loadAndMergeConfig loads configuration files in dirPath -func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { - mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} - dockerDefaultMergedFrom := "" - nsMergedFrom := map[string]string{} - - dir, err := os.Open(dirPath) - if err != nil { - if os.IsNotExist(err) { - return &mergedConfig, nil - } - return nil, err - } - configNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - for _, configName := range configNames { - if !strings.HasSuffix(configName, ".yaml") { - continue - } - configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err - } - - var config registryConfiguration - err = yaml.Unmarshal(configBytes, &config) - if err != nil { - return nil, errors.Wrapf(err, "Error parsing %s", configPath) - } - - if config.DefaultDocker != nil { - if mergedConfig.DefaultDocker != nil { - return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, - dockerDefaultMergedFrom, configPath) - } - mergedConfig.DefaultDocker = config.DefaultDocker - dockerDefaultMergedFrom = configPath - } - - for nsName, nsConfig := range config.Docker { // includes config.Docker == nil - if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, - nsName, nsMergedFrom[nsName], configPath) - } - mergedConfig.Docker[nsName] = nsConfig - nsMergedFrom[nsName] = configPath - } - } - - return &mergedConfig, nil -} - -// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. -// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used. -func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { - if config.Docker != nil { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Using "docker" namespace %s`, identity) - if url := ns.signatureTopLevel(write); url != "" { - return url - } - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Using "docker" namespace %s`, name) - if url := ns.signatureTopLevel(write); url != "" { - return url - } - } - } - } - // Look for a default location - if config.DefaultDocker != nil { - logrus.Debugf(` Using "default-docker" configuration`) - if url := config.DefaultDocker.signatureTopLevel(write); url != "" { - return url - } - } - logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity()) - return "" -} - -// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. -// or "" if nothing has been configured. -func (ns registryNamespace) signatureTopLevel(write bool) string { - if write && ns.SigStoreStaging != "" { - logrus.Debugf(` Using %s`, ns.SigStoreStaging) - return ns.SigStoreStaging - } - if ns.SigStore != "" { - logrus.Debugf(` Using %s`, ns.SigStore) - return ns.SigStore - } - return "" -} - -// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. -// Returns nil iff base == nil. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { - if base == nil { - return nil - } - url := *base - url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) - return &url -} diff --git a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go deleted file mode 100644 index 31bbb544c..000000000 --- a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go +++ /dev/null @@ -1,56 +0,0 @@ -package policyconfiguration - -import ( - "strings" - - "github.com/containers/image/docker/reference" - "github.com/pkg/errors" -) - -// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceIdentity(ref reference.Named) (string, error) { - res := ref.Name() - tagged, isTagged := ref.(reference.NamedTagged) - digested, isDigested := ref.(reference.Canonical) - switch { - case isTagged && isDigested: // Note that this CAN actually happen. - return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) - case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() - return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) - case isTagged: - res = res + ":" + tagged.Tag() - case isDigested: - res = res + "@" + digested.Digest().String() - default: // Coverage: The above was supposed to be exhaustive. - return "", errors.New("Internal inconsistency, unexpected default branch") - } - return res, nil -} - -// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceNamespaces(ref reference.Named) []string { - // Look for a match of the repository, and then of the possible parent - // namespaces. Note that this only happens on the expanded host names - // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", - // then in its parent "docker.io/library"; in none of "busybox", - // un-namespaced "library" nor in "" supposedly implicitly representing "library/". - // - // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last - // iteration matches the host name (for any namespace). - res := []string{} - name := ref.Name() - for { - res = append(res, name) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} diff --git a/vendor/github.com/containers/image/docker/reference/README.md b/vendor/github.com/containers/image/docker/reference/README.md deleted file mode 100644 index 3c4d74eb4..000000000 --- a/vendor/github.com/containers/image/docker/reference/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, -except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/docker/reference/helpers.go b/vendor/github.com/containers/image/docker/reference/helpers.go deleted file mode 100644 index 978df7eab..000000000 --- a/vendor/github.com/containers/image/docker/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/containers/image/docker/reference/normalize.go b/vendor/github.com/containers/image/docker/reference/normalize.go deleted file mode 100644 index 6a86ec64f..000000000 --- a/vendor/github.com/containers/image/docker/reference/normalize.go +++ /dev/null @@ -1,181 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/containers/image/docker/reference/reference.go b/vendor/github.com/containers/image/docker/reference/reference.go deleted file mode 100644 index 8c0c23b2f..000000000 --- a/vendor/github.com/containers/image/docker/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/containers/image/docker/reference/regexp.go b/vendor/github.com/containers/image/docker/reference/regexp.go deleted file mode 100644 index 786034932..000000000 --- a/vendor/github.com/containers/image/docker/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go deleted file mode 100644 index 5f30eddbc..000000000 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ /dev/null @@ -1,407 +0,0 @@ -package tarfile - -import ( - "archive/tar" - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. -type Destination struct { - writer io.Writer - tar *tar.Writer - repoTags []reference.NamedTagged - // Other state. - blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs - config []byte -} - -// NewDestination returns a tarfile.Destination for the specified io.Writer. -func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination { - repoTags := []reference.NamedTagged{} - if ref != nil { - repoTags = append(repoTags, ref) - } - return &Destination{ - writer: dest, - tar: tar.NewWriter(dest), - repoTags: repoTags, - blobs: make(map[digest.Digest]types.BlobInfo), - } -} - -// AddRepoTags adds the specified tags to the destination's repoTags. -func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { - d.repoTags = append(d.repoTags, tags...) -} - -// SupportedManifestMIMETypes tells which manifest mime types the destination supports -// If an empty slice or nil it's returned, then any mime type can be tried to upload -func (d *Destination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *Destination) SupportsSignatures(ctx context.Context) error { - return errors.Errorf("Storing signatures for docker tar files is not supported") -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *Destination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *Destination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *Destination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *Destination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - // Ouch, we need to stream the blob into a temporary file just to determine the size. - // When the layer is decompressed, we also have to generate the digest on uncompressed datas. - if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { - logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") - streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") - if err != nil { - return types.BlobInfo{}, err - } - defer os.Remove(streamCopy.Name()) - defer streamCopy.Close() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(streamCopy, tee) - if err != nil { - return types.BlobInfo{}, err - } - _, err = streamCopy.Seek(0, os.SEEK_SET) - if err != nil { - return types.BlobInfo{}, err - } - inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. - if inputInfo.Digest == "" { - inputInfo.Digest = digester.Digest() - } - stream = streamCopy - logrus.Debugf("... streaming done") - } - - // Maybe the blob has been already sent - ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false) - if err != nil { - return types.BlobInfo{}, err - } - if ok { - return reusedInfo, nil - } - - if isConfig { - buf, err := ioutil.ReadAll(stream) - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream") - } - d.config = buf - if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file") - } - } else { - // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way - // writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described - // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) - // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers - // in the root of the tarball. - if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil { - return types.BlobInfo{}, err - } - } - d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size} - return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") - } - if blob, ok := d.blobs[info.Digest]; ok { - return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil - } - return false, types.BlobInfo{}, nil -} - -func (d *Destination) createRepositoriesFile(rootLayerID string) error { - repositories := map[string]map[string]string{} - for _, repoTag := range d.repoTags { - if val, ok := repositories[repoTag.Name()]; ok { - val[repoTag.Tag()] = rootLayerID - } else { - repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID} - } - } - - b, err := json.Marshal(repositories) - if err != nil { - return errors.Wrap(err, "Error marshaling repositories") - } - if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil { - return errors.Wrap(err, "Error writing config json file") - } - return nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *Destination) PutManifest(ctx context.Context, m []byte) error { - // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, - // so the caller trying a different manifest kind would be pointless. - var man manifest.Schema2 - if err := json.Unmarshal(m, &man); err != nil { - return errors.Wrap(err, "Error parsing manifest") - } - if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { - return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") - } - - layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors) - if err != nil { - return err - } - - if len(man.LayersDescriptors) > 0 { - if err := d.createRepositoriesFile(lastLayerID); err != nil { - return err - } - } - - repoTags := []string{} - for _, tag := range d.repoTags { - // For github.com/docker/docker consumers, this works just as well as - // refString := ref.String() - // because when reading the RepoTags strings, github.com/docker/docker/reference - // normalizes both of them to the same value. - // - // Doing it this way to include the normalized-out `docker.io[/library]` does make - // a difference for github.com/projectatomic/docker consumers, with the - // “Add --add-registry and --block-registry options to docker daemon” patch. - // These consumers treat reference strings which include a hostname and reference - // strings without a hostname differently. - // - // Using the host name here is more explicit about the intent, and it has the same - // effect as (docker pull) in projectatomic/docker, which tags the result using - // a hostname-qualified reference. - // See https://github.com/containers/image/issues/72 for a more detailed - // analysis and explanation. - refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) - repoTags = append(repoTags, refString) - } - - items := []ManifestItem{{ - Config: man.ConfigDescriptor.Digest.Hex() + ".json", - RepoTags: repoTags, - Layers: layerPaths, - Parent: "", - LayerSources: nil, - }} - itemsBytes, err := json.Marshal(&items) - if err != nil { - return err - } - - // FIXME? Do we also need to support the legacy format? - return d.sendBytes(manifestFileName, itemsBytes) -} - -// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers -func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) { - var chainID digest.Digest - lastLayerID = "" - for i, l := range layerDescriptors { - // This chainID value matches the computation in docker/docker/layer.CreateChainID … - if chainID == "" { - chainID = l.Digest - } else { - chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) - } - // … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent - // versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop. - // - // Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID - // (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more - // times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation - // which also mixes in the full image configuration seems unnecessary, at least as long as we are storing - // only a single image per tarball, i.e. all DiffID prefixes are unique (can’t differ only with - // configuration). - layerID := chainID.Hex() - - physicalLayerPath := l.Digest.Hex() + ".tar" - // The layer itself has been stored into physicalLayerPath in PutManifest. - // So, use that path for layerPaths used in the non-legacy manifest - layerPaths = append(layerPaths, physicalLayerPath) - // ... and create a symlink for the legacy format; - if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { - return nil, "", errors.Wrap(err, "Error creating layer symbolic link") - } - - b := []byte("1.0") - if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil { - return nil, "", errors.Wrap(err, "Error writing VERSION file") - } - - // The legacy format requires a config file per layer - layerConfig := make(map[string]interface{}) - layerConfig["id"] = layerID - - // The root layer doesn't have any parent - if lastLayerID != "" { - layerConfig["parent"] = lastLayerID - } - // The root layer configuration file is generated by using subpart of the image configuration - if i == len(layerDescriptors)-1 { - var config map[string]*json.RawMessage - err := json.Unmarshal(d.config, &config) - if err != nil { - return nil, "", errors.Wrap(err, "Error unmarshaling config") - } - for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { - layerConfig[attr] = config[attr] - } - } - b, err := json.Marshal(layerConfig) - if err != nil { - return nil, "", errors.Wrap(err, "Error marshaling layer config") - } - if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil { - return nil, "", errors.Wrap(err, "Error writing config json file") - } - - lastLayerID = layerID - } - return layerPaths, lastLayerID, nil -} - -type tarFI struct { - path string - size int64 - isSymlink bool -} - -func (t *tarFI) Name() string { - return t.path -} -func (t *tarFI) Size() int64 { - return t.size -} -func (t *tarFI) Mode() os.FileMode { - if t.isSymlink { - return os.ModeSymlink - } - return 0444 -} -func (t *tarFI) ModTime() time.Time { - return time.Unix(0, 0) -} -func (t *tarFI) IsDir() bool { - return false -} -func (t *tarFI) Sys() interface{} { - return nil -} - -// sendSymlink sends a symlink into the tar stream. -func (d *Destination) sendSymlink(path string, target string) error { - hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) - if err != nil { - return nil - } - logrus.Debugf("Sending as tar link %s -> %s", path, target) - return d.tar.WriteHeader(hdr) -} - -// sendBytes sends a path into the tar stream. -func (d *Destination) sendBytes(path string, b []byte) error { - return d.sendFile(path, int64(len(b)), bytes.NewReader(b)) -} - -// sendFile sends a file into the tar stream. -func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error { - hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") - if err != nil { - return nil - } - logrus.Debugf("Sending as tar file %s", path) - if err := d.tar.WriteHeader(hdr); err != nil { - return err - } - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - size, err := io.Copy(d.tar, stream) - if err != nil { - return err - } - if size != expectedSize { - return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) - } - return nil -} - -// PutSignatures adds the given signatures to the docker tarfile (currently not -// supported). MUST be called after PutManifest (signatures reference manifest -// contents) -func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte) error { - if len(signatures) != 0 { - return errors.Errorf("Storing signatures for docker tar files is not supported") - } - return nil -} - -// Commit finishes writing data to the underlying io.Writer. -// It is the caller's responsibility to close it, if necessary. -func (d *Destination) Commit(ctx context.Context) error { - return d.tar.Close() -} diff --git a/vendor/github.com/containers/image/docker/tarfile/doc.go b/vendor/github.com/containers/image/docker/tarfile/doc.go deleted file mode 100644 index 4ea5369c0..000000000 --- a/vendor/github.com/containers/image/docker/tarfile/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package tarfile is an internal implementation detail of some transports. -// Do not use outside of the github.com/containers/image repo! -package tarfile diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go deleted file mode 100644 index dd5d78fe8..000000000 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ /dev/null @@ -1,478 +0,0 @@ -package tarfile - -import ( - "archive/tar" - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "path" - "sync" - - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/types" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Source is a partial implementation of types.ImageSource for reading from tarPath. -type Source struct { - tarPath string - removeTarPathOnClose bool // Remove temp file on close if true - // The following data is only available after ensureCachedDataIsPresent() succeeds - tarManifest *ManifestItem // nil if not available yet. - configBytes []byte - configDigest digest.Digest - orderedDiffIDList []digest.Digest - knownLayers map[digest.Digest]*layerInfo - // Other state - generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. - cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe - cacheDataResult error // Private state for ensureCachedDataIsPresent -} - -type layerInfo struct { - path string - size int64 -} - -// TODO: We could add support for multiple images in a single archive, so -// that people could use docker-archive:opensuse.tar:opensuse:leap as -// the source of an image. -// To do for both the NewSourceFromFile and NewSourceFromStream functions - -// NewSourceFromFile returns a tarfile.Source for the specified path. -func NewSourceFromFile(path string) (*Source, error) { - file, err := os.Open(path) - if err != nil { - return nil, errors.Wrapf(err, "error opening file %q", path) - } - defer file.Close() - - // If the file is already not compressed we can just return the file itself - // as a source. Otherwise we pass the stream to NewSourceFromStream. - stream, isCompressed, err := compression.AutoDecompress(file) - if err != nil { - return nil, errors.Wrapf(err, "Error detecting compression for file %q", path) - } - defer stream.Close() - if !isCompressed { - return &Source{ - tarPath: path, - }, nil - } - return NewSourceFromStream(stream) -} - -// NewSourceFromStream returns a tarfile.Source for the specified inputStream, -// which can be either compressed or uncompressed. The caller can close the -// inputStream immediately after NewSourceFromFile returns. -func NewSourceFromStream(inputStream io.Reader) (*Source, error) { - // FIXME: use SystemContext here. - // Save inputStream to a temporary file - tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar") - if err != nil { - return nil, errors.Wrap(err, "error creating temporary file") - } - defer tarCopyFile.Close() - - succeeded := false - defer func() { - if !succeeded { - os.Remove(tarCopyFile.Name()) - } - }() - - // In order to be compatible with docker-load, we need to support - // auto-decompression (it's also a nice quality-of-life thing to avoid - // giving users really confusing "invalid tar header" errors). - uncompressedStream, _, err := compression.AutoDecompress(inputStream) - if err != nil { - return nil, errors.Wrap(err, "Error auto-decompressing input") - } - defer uncompressedStream.Close() - - // Copy the plain archive to the temporary file. - // - // TODO: This can take quite some time, and should ideally be cancellable - // using a context.Context. - if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { - return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name()) - } - succeeded = true - - return &Source{ - tarPath: tarCopyFile.Name(), - removeTarPathOnClose: true, - }, nil -} - -// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. -type tarReadCloser struct { - *tar.Reader - backingFile *os.File -} - -func (t *tarReadCloser) Close() error { - return t.backingFile.Close() -} - -// openTarComponent returns a ReadCloser for the specific file within the archive. -// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), -// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. -// The caller should call .Close() on the returned stream. -func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) { - f, err := os.Open(s.tarPath) - if err != nil { - return nil, err - } - succeeded := false - defer func() { - if !succeeded { - f.Close() - } - }() - - tarReader, header, err := findTarComponent(f, componentPath) - if err != nil { - return nil, err - } - if header == nil { - return nil, os.ErrNotExist - } - if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested - // We follow only one symlink; so no loops are possible. - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - return nil, err - } - // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, - // so we don't care. - tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) - if err != nil { - return nil, err - } - if header == nil { - return nil, os.ErrNotExist - } - } - - if !header.FileInfo().Mode().IsRegular() { - return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) - } - succeeded = true - return &tarReadCloser{Reader: tarReader, backingFile: f}, nil -} - -// findTarComponent returns a header and a reader matching path within inputFile, -// or (nil, nil, nil) if not found. -func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { - t := tar.NewReader(inputFile) - for { - h, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, nil, err - } - if h.Name == path { - return t, h, nil - } - } - return nil, nil, nil -} - -// readTarComponent returns full contents of componentPath. -func (s *Source) readTarComponent(path string) ([]byte, error) { - file, err := s.openTarComponent(path) - if err != nil { - return nil, errors.Wrapf(err, "Error loading tar component %s", path) - } - defer file.Close() - bytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ensureCachedDataIsPresent loads data necessary for any of the public accessors. -// It is safe to call this from multi-threaded code. -func (s *Source) ensureCachedDataIsPresent() error { - s.cacheDataLock.Do(func() { - s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() - }) - return s.cacheDataResult -} - -// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. -// Call ensureCachedDataIsPresent instead. -func (s *Source) ensureCachedDataIsPresentPrivate() error { - // Read and parse manifest.json - tarManifest, err := s.loadTarManifest() - if err != nil { - return err - } - - // Check to make sure length is 1 - if len(tarManifest) != 1 { - return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) - } - - // Read and parse config. - configBytes, err := s.readTarComponent(tarManifest[0].Config) - if err != nil { - return err - } - var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. - if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { - return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) - } - - knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) - if err != nil { - return err - } - - // Success; commit. - s.tarManifest = &tarManifest[0] - s.configBytes = configBytes - s.configDigest = digest.FromBytes(configBytes) - s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs - s.knownLayers = knownLayers - return nil -} - -// loadTarManifest loads and decodes the manifest.json. -func (s *Source) loadTarManifest() ([]ManifestItem, error) { - // FIXME? Do we need to deal with the legacy format? - bytes, err := s.readTarComponent(manifestFileName) - if err != nil { - return nil, err - } - var items []ManifestItem - if err := json.Unmarshal(bytes, &items); err != nil { - return nil, errors.Wrap(err, "Error decoding tar manifest.json") - } - return items, nil -} - -// Close removes resources associated with an initialized Source, if any. -func (s *Source) Close() error { - if s.removeTarPathOnClose { - return os.Remove(s.tarPath) - } - return nil -} - -// LoadTarManifest loads and decodes the manifest.json -func (s *Source) LoadTarManifest() ([]ManifestItem, error) { - return s.loadTarManifest() -} - -func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { - // Collect layer data available in manifest and config. - if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { - return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) - } - knownLayers := map[digest.Digest]*layerInfo{} - unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. - for i, diffID := range parsedConfig.RootFS.DiffIDs { - if _, ok := knownLayers[diffID]; ok { - // Apparently it really can happen that a single image contains the same layer diff more than once. - // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter - // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. - continue - } - layerPath := tarManifest.Layers[i] - if _, ok := unknownLayerSizes[layerPath]; ok { - return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) - } - li := &layerInfo{ // A new element in each iteration - path: layerPath, - size: -1, - } - knownLayers[diffID] = li - unknownLayerSizes[layerPath] = li - } - - // Scan the tar file to collect layer sizes. - file, err := os.Open(s.tarPath) - if err != nil { - return nil, err - } - defer file.Close() - t := tar.NewReader(file) - for { - h, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - if li, ok := unknownLayerSizes[h.Name]; ok { - // Since GetBlob will decompress layers that are compressed we need - // to do the decompression here as well, otherwise we will - // incorrectly report the size. Pretty critical, since tools like - // umoci always compress layer blobs. Obviously we only bother with - // the slower method of checking if it's compressed. - uncompressedStream, isCompressed, err := compression.AutoDecompress(t) - if err != nil { - return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name) - } - defer uncompressedStream.Close() - - uncompressedSize := h.Size - if isCompressed { - uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) - if err != nil { - return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name) - } - } - li.size = uncompressedSize - delete(unknownLayerSizes, h.Name) - } - } - if len(unknownLayerSizes) != 0 { - return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. - } - - return knownLayers, nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. - return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) - } - if s.generatedManifest == nil { - if err := s.ensureCachedDataIsPresent(); err != nil { - return nil, "", err - } - m := manifest.Schema2{ - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: manifest.Schema2Descriptor{ - MediaType: manifest.DockerV2Schema2ConfigMediaType, - Size: int64(len(s.configBytes)), - Digest: s.configDigest, - }, - LayersDescriptors: []manifest.Schema2Descriptor{}, - } - for _, diffID := range s.orderedDiffIDList { - li, ok := s.knownLayers[diffID] - if !ok { - return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) - } - m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ - Digest: diffID, // diffID is a digest of the uncompressed tarball - MediaType: manifest.DockerV2Schema2LayerMediaType, - Size: li.size, - }) - } - manifestBytes, err := json.Marshal(&m) - if err != nil { - return nil, "", err - } - s.generatedManifest = manifestBytes - } - return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil -} - -// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. -type uncompressedReadCloser struct { - io.Reader - underlyingCloser func() error - uncompressedCloser func() error -} - -func (r uncompressedReadCloser) Close() error { - var res error - if err := r.uncompressedCloser(); err != nil { - res = err - } - if err := r.underlyingCloser(); err != nil && res == nil { - res = err - } - return res -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *Source) HasThreadSafeGetBlob() bool { - return true -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if err := s.ensureCachedDataIsPresent(); err != nil { - return nil, 0, err - } - - if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. - return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil - } - - if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, - underlyingStream, err := s.openTarComponent(li.path) - if err != nil { - return nil, 0, err - } - closeUnderlyingStream := true - defer func() { - if closeUnderlyingStream { - underlyingStream.Close() - } - }() - - // In order to handle the fact that digests != diffIDs (and thus that a - // caller which is trying to verify the blob will run into problems), - // we need to decompress blobs. This is a bit ugly, but it's a - // consequence of making everything addressable by their DiffID rather - // than by their digest... - // - // In particular, because the v2s2 manifest being generated uses - // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of - // layers not their _actual_ digest. The result is that copy/... will - // be verifing a "digest" which is not the actual layer's digest (but - // is instead the DiffID). - - uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) - if err != nil { - return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest) - } - - newStream := uncompressedReadCloser{ - Reader: uncompressedStream, - underlyingCloser: underlyingStream.Close, - uncompressedCloser: uncompressedStream.Close, - } - closeUnderlyingStream = false - - return newStream, li.size, nil - } - - return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. - return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) - } - return [][]byte{}, nil -} diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go deleted file mode 100644 index e81d93930..000000000 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ /dev/null @@ -1,28 +0,0 @@ -package tarfile - -import ( - "github.com/containers/image/manifest" - "github.com/opencontainers/go-digest" -) - -// Various data structures. - -// Based on github.com/docker/docker/image/tarexport/tarexport.go -const ( - manifestFileName = "manifest.json" - legacyLayerFileName = "layer.tar" - legacyConfigFileName = "json" - legacyVersionFileName = "VERSION" - legacyRepositoriesFileName = "repositories" -) - -// ManifestItem is an element of the array stored in the top-level manifest.json file. -type ManifestItem struct { - Config string - RepoTags []string - Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` -} - -type imageID string diff --git a/vendor/github.com/containers/image/docker/wwwauthenticate.go b/vendor/github.com/containers/image/docker/wwwauthenticate.go deleted file mode 100644 index 23664a74a..000000000 --- a/vendor/github.com/containers/image/docker/wwwauthenticate.go +++ /dev/null @@ -1,159 +0,0 @@ -package docker - -// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. - -import ( - "net/http" - "strings" -) - -// challenge carries information from a WWW-Authenticate response header. -// See RFC 7235. -type challenge struct { - // Scheme is the auth-scheme according to RFC 7235 - Scheme string - - // Parameters are the auth-params according to RFC 7235 - Parameters map[string]string -} - -// Octet types from RFC 7230. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -// NOTE: This is not a fully compliant parser per RFC 7235: -// Most notably it does not support more than one challenge within a single header -// Some of the whitespace parsing also seems noncompliant. -// But it is clearly better than what we used to have… -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go deleted file mode 100644 index 1f0faa1ad..000000000 --- a/vendor/github.com/containers/image/image/docker_list.go +++ /dev/null @@ -1,94 +0,0 @@ -package image - -import ( - "context" - "encoding/json" - "fmt" - "runtime" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type platformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} - -// A manifestDescriptor references a platform-specific manifest. -type manifestDescriptor struct { - manifest.Schema2Descriptor - Platform platformSpec `json:"platform"` -} - -type manifestList struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []manifestDescriptor `json:"manifests"` -} - -// chooseDigestFromManifestList parses blob as a schema2 manifest list, -// and returns the digest of the image appropriate for the current environment. -func chooseDigestFromManifestList(sys *types.SystemContext, blob []byte) (digest.Digest, error) { - wantedArch := runtime.GOARCH - if sys != nil && sys.ArchitectureChoice != "" { - wantedArch = sys.ArchitectureChoice - } - wantedOS := runtime.GOOS - if sys != nil && sys.OSChoice != "" { - wantedOS = sys.OSChoice - } - - list := manifestList{} - if err := json.Unmarshal(blob, &list); err != nil { - return "", err - } - for _, d := range list.Manifests { - if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { - return d.Digest, nil - } - } - return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) -} - -func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - targetManifestDigest, err := chooseDigestFromManifestList(sys, manblob) - if err != nil { - return nil, err - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, err - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} - -// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate -// for the current system from the manifest available from src. -func ChooseManifestInstanceFromManifestList(ctx context.Context, sys *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { - // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, - // probably along with manifest list editing. - blob, mt, err := src.Manifest(ctx) - if err != nil { - return "", err - } - if mt != manifest.DockerV2ListMediaType { - return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) - } - return chooseDigestFromManifestList(sys, blob) -} diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go deleted file mode 100644 index 28cec7ddb..000000000 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ /dev/null @@ -1,202 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type manifestSchema1 struct { - m *manifest.Schema1 -} - -func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { - m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema1) manifestMIMEType() string { - return manifest.DockerV2Schema1SignedMediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { - return nil, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - v2s2, err := m.convertToManifestSchema2(nil, nil) - if err != nil { - return nil, err - } - return v2s2.OCIConfig(ctx) -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - // This is a bit convoluted: We can’t just have a "get embedded docker reference" method - // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually - // embed a full docker/distribution reference, but only the repo name and tag (without the host name). - // So we would have to provide a “return repo without host name, and tag” getter for the generic code, - // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the - // generic copy code needs to know about is reference.Named and that a manifest may need updating - // for some destinations. - name := reference.Path(ref) - var tag string - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } else { - tag = "" - } - return m.m.Name != name || m.m.Tag != tag -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { - return m.m.Inspect(nil) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - if options.EmbeddedDockerReference != nil { - copy.m.Name = reference.Path(options.EmbeddedDockerReference) - if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.m.Tag = tagged.Tag() - } else { - copy.m.Tag = "" - } - } - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, - // handle conversions between them by doing nothing. - case manifest.DockerV2Schema2MediaType: - m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - if err != nil { - return nil, err - } - return memoryImageFromManifest(m2), nil - case imgspecv1.MediaTypeImageManifest: - // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest - m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - if err != nil { - return nil, err - } - return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ - ManifestMIMEType: imgspecv1.MediaTypeImageManifest, - InformationOnly: options.InformationOnly, - }) - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -// Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { - if len(m.m.ExtractedV1Compatibility) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) - } - if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) - } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) - } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) - } - - // Build a list of the diffIDs for the non-empty layers. - diffIDs := []digest.Digest{} - var layers []manifest.Schema2Descriptor - for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index - - if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { - var size int64 - if uploadedLayerInfos != nil { - size = uploadedLayerInfos[v2Index].Size - } - var d digest.Digest - if layerDiffIDs != nil { - d = layerDiffIDs[v2Index] - } - layers = append(layers, manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Size: size, - Digest: m.m.FSLayers[v1Index].BlobSum, - }) - diffIDs = append(diffIDs, d) - } - } - configJSON, err := m.m.ToSchema2Config(diffIDs) - if err != nil { - return nil, err - } - configDescriptor := manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.container.image.v1+json", - Size: int64(len(configJSON)), - Digest: digest.FromBytes(configJSON), - } - - return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil -} diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go deleted file mode 100644 index 351e73ea1..000000000 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ /dev/null @@ -1,351 +0,0 @@ -package image - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "io/ioutil" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache/none" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) -// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is -// a non-zero embedded timestamp; we could zero that, but that would just waste storage space -// in registries, so let’s use the same values. -var GzippedEmptyLayer = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} - -// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer -const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - m *manifest.Schema2 -} - -func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema2FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema2{ - src: src, - m: m, - }, nil -} - -// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { - return &manifestSchema2{ - src: src, - configBlob: configBlob, - m: manifest.Schema2FromComponents(config, layers), - } -} - -func (m *manifestSchema2) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema2) manifestMIMEType() string { - return m.m.MediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - configBlob, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields - // than OCI v1. This unmarshal makes sure we drop docker v2s2 - // fields that aren't needed in OCI v1. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(configBlob, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := ioutil.ReadAll(stream) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.Schema2Clone(m.m), - } - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: - return copy.convertToManifestSchema1(ctx, options.InformationOnly.Destination) - case imgspecv1.MediaTypeImageManifest: - return copy.convertToManifestOCI1(ctx) - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Image, error) { - configOCI, err := m.OCIConfig(ctx) - if err != nil { - return nil, err - } - configOCIBytes, err := json.Marshal(configOCI) - if err != nil { - return nil, err - } - - config := imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - } - - layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) - for idx := range layers { - layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable - } else { - // we assume layers are gzip'ed because docker v2s2 only deals with - // gzip'ed layers. However, OCI has non-gzip'ed layers as well. - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip - } - } - - m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) - return memoryImageFromManifest(m1), nil -} - -// Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest types.ImageDestination) (types.Image, error) { - configBytes, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - imageConfig := &manifest.Schema2Image{} - if err := json.Unmarshal(configBytes, imageConfig); err != nil { - return nil, err - } - - // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) - history := make([]manifest.Schema1History, len(imageConfig.History)) - nonemptyLayerIndex := 0 - var parentV1ID string // Set in the loop - v1ID := "" - haveGzippedEmptyLayer := false - if len(imageConfig.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) - } - for v2Index, historyEntry := range imageConfig.History { - parentV1ID = v1ID - v1Index := len(imageConfig.History) - 1 - v2Index - - var blobDigest digest.Digest - if historyEntry.EmptyLayer { - if !haveGzippedEmptyLayer { - logrus.Debugf("Uploading empty layer during conversion to schema 1") - // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, - // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) - if err != nil { - return nil, errors.Wrap(err, "Error uploading empty layer") - } - if info.Digest != GzippedEmptyLayerDigest { - return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest) - } - haveGzippedEmptyLayer = true - } - blobDigest = GzippedEmptyLayerDigest - } else { - if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) - } - blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest - nonemptyLayerIndex++ - } - - // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. - v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) - if err != nil { - return nil, err - } - v1ID = v - - fakeImage := manifest.Schema1V1Compatibility{ - ID: v1ID, - Parent: parentV1ID, - Comment: historyEntry.Comment, - Created: historyEntry.Created, - Author: historyEntry.Author, - ThrowAway: historyEntry.EmptyLayer, - } - fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} - v1CompatibilityBytes, err := json.Marshal(&fakeImage) - if err != nil { - return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) - } - - fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} - history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} - // Note that parentV1ID of the top layer is preserved when exiting this loop - } - - // Now patch in real configuration for the top layer (v1Index == 0) - v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. - if err != nil { - return nil, err - } - v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) - if err != nil { - return nil, err - } - history[0].V1Compatibility = string(v1Config) - - m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) - if err != nil { - return nil, err // This should never happen, we should have created all the components correctly. - } - return memoryImageFromManifest(m1), nil -} - -func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { - if err := blobDigest.Validate(); err != nil { - return "", err - } - parts := append([]string{blobDigest.Hex()}, others...) - v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) - return hex.EncodeToString(v1IDHash[:]), nil -} - -func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - delete(rawContents, "rootfs") - delete(rawContents, "history") - - updates := map[string]interface{}{"id": v1ID} - if parentV1ID != "" { - updates["parent"] = parentV1ID - } - if throwaway { - updates["throwaway"] = throwaway - } - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go deleted file mode 100644 index c5ca5b12e..000000000 --- a/vendor/github.com/containers/image/image/manifest.go +++ /dev/null @@ -1,73 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// genericManifest is an interface for parsing, modifying image manifests and related data. -// Note that the public methods are intended to be a subset of types.Image -// so that embedding a genericManifest into structs works. -// will support v1 one day... -type genericManifest interface { - serialize() ([]byte, error) - manifestMIMEType() string - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() types.BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*imgspecv1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*types.ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // This does not change the state of the original Image object. - UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) -} - -// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. -// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. -func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch manifest.NormalizedMIMEType(mt) { - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - return manifestSchema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return manifestOCI1FromManifest(src, manblob) - case manifest.DockerV2Schema2MediaType: - return manifestSchema2FromManifest(src, manblob) - case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(ctx, sys, src, manblob) - default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) - } -} - -// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. -func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { - blobs := make([]types.BlobInfo, len(layers)) - for i, layer := range layers { - blobs[i] = layer.BlobInfo - } - return blobs -} diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go deleted file mode 100644 index 0be69eca4..000000000 --- a/vendor/github.com/containers/image/image/memory.go +++ /dev/null @@ -1,65 +0,0 @@ -package image - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/containers/image/types" -) - -// memoryImage is a mostly-implementation of types.Image assembled from data -// created in memory, used primarily as a return value of types.Image.UpdatedImage -// as a way to carry various structured information in a type-safe and easy-to-use way. -// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone -// collection of all related information, e.g. there is no way to get layer blobs -// from a memoryImage. -type memoryImage struct { - genericManifest - serializedManifest []byte // A private cache for Manifest() -} - -func memoryImageFromManifest(m genericManifest) types.Image { - return &memoryImage{ - genericManifest: m, - serializedManifest: nil, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *memoryImage) Reference() types.ImageReference { - // It would really be inappropriate to return the ImageReference of the image this was based on. - return nil -} - -// Size returns the size of the image as stored, if known, or -1 if not. -func (i *memoryImage) Size() (int64, error) { - return -1, nil -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.serializedManifest == nil { - m, err := i.genericManifest.serialize() - if err != nil { - return nil, "", err - } - i.serializedManifest = m - } - return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { - // Modifying an image invalidates signatures; a caller asking the updated image for signatures - // is probably confused. - return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") -} - -// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go deleted file mode 100644 index cdff26e06..000000000 --- a/vendor/github.com/containers/image/image/oci.go +++ /dev/null @@ -1,198 +0,0 @@ -package image - -import ( - "context" - "encoding/json" - "io/ioutil" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache/none" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of m.Config. - m *manifest.OCI1 -} - -func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestOCI1{ - src: src, - m: m, - }, nil -} - -// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { - return &manifestOCI1{ - src: src, - configBlob: configBlob, - m: manifest.OCI1FromComponents(config, layers), - } -} - -func (m *manifestOCI1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestOCI1) manifestMIMEType() string { - return imgspecv1.MediaTypeImageManifest -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := ioutil.ReadAll(stream) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - cb, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.OCI1Clone(m.m), - } - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We can't directly convert to V1, but we can transitively convert via a V2 image - m2, err := copy.convertToManifestSchema2() - if err != nil { - return nil, err - } - return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ - ManifestMIMEType: options.ManifestMIMEType, - InformationOnly: options.InformationOnly, - }) - case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2() - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { - return manifest.Schema2Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { - // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(m.m.Config) - - // The only difference between OCI and DockerSchema2 is the mediatypes. The - // media type of the manifest is handled by manifestSchema2FromComponents. - config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) - for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) - layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType - } - - // Rather than copying the ConfigBlob now, we just pass m.src to the - // translated manifest, since the only difference is the mediatype of - // descriptors there is no change to any blob stored in m.src. - m1 := manifestSchema2FromComponents(config, m.src, nil, layers) - return memoryImageFromManifest(m1), nil -} diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go deleted file mode 100644 index 01cc28bbd..000000000 --- a/vendor/github.com/containers/image/image/sourced.go +++ /dev/null @@ -1,103 +0,0 @@ -// Package image consolidates knowledge about various container image formats -// (as opposed to image storage mechanisms, which are handled by types.ImageSource) -// and exposes all of them using an unified interface. -package image - -import ( - "context" - "github.com/containers/image/types" -) - -// imageCloser implements types.ImageCloser, perhaps allowing simple users -// to use a single object without having keep a reference to a types.ImageSource -// only to call types.ImageSource.Close(). -type imageCloser struct { - types.Image - src types.ImageSource -} - -// FromSource returns a types.ImageCloser implementation for the default instance of source. -// If source is a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate image instance. -// -// The caller must call .Close() on the returned ImageCloser. -// -// FromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// Image and ImageSource objects simultaneously, but it means that they only need to -// the Image.) -// -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { - img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) - if err != nil { - return nil, err - } - return &imageCloser{ - Image: img, - src: src, - }, nil -} - -func (ic *imageCloser) Close() error { - return ic.src.Close() -} - -// sourcedImage is a general set of utilities for working with container images, -// whatever is their underlying location (i.e. dockerImageSource-independent). -// Note the existence of skopeo/docker.Image: some instances of a `types.Image` -// may not be a `sourcedImage` directly. However, most users of `types.Image` -// do not care, and those who care about `skopeo/docker.Image` know they do. -type sourcedImage struct { - *UnparsedImage - manifestBlob []byte - manifestMIMEType string - // genericManifest contains data corresponding to manifestBlob. - // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest - // if you want to preserve the original manifest; use manifestBlob directly. - genericManifest -} - -// FromUnparsedImage returns a types.Image implementation for unparsed. -// If unparsed represents a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate single image. -// -// The Image must not be used after the underlying ImageSource is Close()d. -func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { - // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: - // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, - // this is the only UnparsedImage implementation around, anyway. - - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). - manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) - if err != nil { - return nil, err - } - - parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) - if err != nil { - return nil, err - } - - return &sourcedImage{ - UnparsedImage: unparsed, - manifestBlob: manifestBlob, - manifestMIMEType: manifestMIMEType, - genericManifest: parsedManifest, - }, nil -} - -// Size returns the size of the image as stored, if it's known, or -1 if it isn't. -func (i *sourcedImage) Size() (int64, error) { - return -1, nil -} - -// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. -func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { - return i.manifestBlob, i.manifestMIMEType, nil -} - -func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return i.UnparsedImage.src.LayerInfosForCopy(ctx) -} diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go deleted file mode 100644 index 2c9280d35..000000000 --- a/vendor/github.com/containers/image/image/unparsed.go +++ /dev/null @@ -1,95 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// UnparsedImage implements types.UnparsedImage . -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -type UnparsedImage struct { - src types.ImageSource - instanceDigest *digest.Digest - cachedManifest []byte // A private cache for Manifest(); nil if not yet known. - // A private cache for Manifest(), may be the empty string if guessing failed. - // Valid iff cachedManifest is not nil. - cachedManifestMIMEType string - cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. -} - -// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { - return &UnparsedImage{ - src: src, - instanceDigest: instanceDigest, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *UnparsedImage) Reference() types.ImageReference { - // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. - return i.src.Reference() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) - if err != nil { - return nil, "", err - } - - // ImageSource.GetManifest does not do digest verification, but we do; - // this immediately protects also any user of types.Image. - if digest, haveDigest := i.expectedManifestDigest(); haveDigest { - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } - } - - i.cachedManifest = m - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. -// The bool return value seems redundant with digest != ""; it is used explicitly -// to refuse (unexpected) situations when the digest exists but is "". -func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { - if i.instanceDigest != nil { - return *i.instanceDigest, true - } - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - return canonical.Digest(), true - } - } - return "", false -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil -} diff --git a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go deleted file mode 100644 index 8c776929c..000000000 --- a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go +++ /dev/null @@ -1,29 +0,0 @@ -package tmpdir - -import ( - "os" - "runtime" -) - -// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path' -var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles - -// builtinUnixTempDirForBigFiles is the directory path to store big files. -// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. -// DO NOT change this, instead see unixTempDirForBigFiles above. -const builtinUnixTempDirForBigFiles = "/var/tmp" - -// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. -// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp -// which on systemd based systems could be the unsuitable tmpfs filesystem. -func TemporaryDirectoryForBigFiles() string { - var temporaryDirectoryForBigFiles string - if runtime.GOOS == "windows" { - temporaryDirectoryForBigFiles = os.TempDir() - } else { - temporaryDirectoryForBigFiles = unixTempDirForBigFiles - } - return temporaryDirectoryForBigFiles -} diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go deleted file mode 100644 index b76286b8f..000000000 --- a/vendor/github.com/containers/image/manifest/docker_schema1.go +++ /dev/null @@ -1,316 +0,0 @@ -package manifest - -import ( - "encoding/json" - "regexp" - "strings" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/docker/docker/api/types/versions" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. -type Schema1FSLayers struct { - BlobSum digest.Digest `json:"blobSum"` -} - -// Schema1History is an entry of the "history" array in docker/distribution schema 1. -type Schema1History struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// Schema1 is a manifest in docker/distribution schema 1. -type Schema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []Schema1FSLayers `json:"fsLayers"` - History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! - ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) - SchemaVersion int `json:"schemaVersion"` -} - -type schema1V1CompatibilityContainerConfig struct { - Cmd []string -} - -// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. -type Schema1V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. -// (NOTE: The instance is not necessary a literal representation of the original blob, -// layers with duplicate IDs are eliminated.) -func Schema1FromManifest(manifest []byte) (*Schema1, error) { - s1 := Schema1{} - if err := json.Unmarshal(manifest, &s1); err != nil { - return nil, err - } - if s1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) - } - if err := s1.initialize(); err != nil { - return nil, err - } - if err := s1.fixManifestLayers(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. -func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - s1 := Schema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } - if err := s1.initialize(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1Clone creates a copy of the supplied Schema1 manifest. -func Schema1Clone(src *Schema1) *Schema1 { - copy := *src - return © -} - -// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. -func (m *Schema1) initialize() error { - if len(m.FSLayers) != len(m.History) { - return errors.New("length of history not equal to number of layers") - } - if len(m.FSLayers) == 0 { - return errors.New("no FSLayers in manifest") - } - m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) - for i, h := range m.History { - if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { - return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i) - } - } - return nil -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema1) LayerInfos() []LayerInfo { - layers := make([]LayerInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = LayerInfo{ - BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, - EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, - } - } - return layers -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. - if len(m.FSLayers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) - } - m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) - for i, info := range layerInfos { - // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema1) Serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) - if err != nil { - return nil, err - } - return AddDummyV2S1Signature(unsigned) -} - -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), -// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, -// both from m.History and m.FSLayers). -// Note that even after this succeeds, m.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func (m *Schema1) fixManifestLayers() error { - // m.initialize() has verified that len(m.FSLayers) == len(m.History) - for _, compat := range m.ExtractedV1Compatibility { - if err := validateV1ID(compat.ID); err != nil { - return err - } - } - if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range m.ExtractedV1Compatibility { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { - if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) - } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) - } - } - return nil -} - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - s1 := &Schema2V1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { - return nil, err - } - i := &types.ImageInspectInfo{ - Tag: m.Tag, - Created: &s1.Created, - DockerVersion: s1.DockerVersion, - Architecture: s1.Architecture, - Os: s1.OS, - Layers: layerInfosToStrings(m.LayerInfos()), - } - if s1.Config != nil { - i.Labels = s1.Config.Labels - i.Env = s1.Config.Env - } - return i, nil -} - -// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. -func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { - // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields - // that aren't directly comparable using info from the manifest. - if len(m.History) == 0 { - return nil, errors.New("image has no layers") - } - s1 := Schema2V1Image{} - config := []byte(m.History[0].V1Compatibility) - err := json.Unmarshal(config, &s1) - if err != nil { - return nil, errors.Wrapf(err, "error decoding configuration") - } - // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, - // adding some fields that aren't "omitempty". - if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { - config, err = json.Marshal(&s1) - if err != nil { - return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1) - } - } - // Build the history. - convertedHistory := []Schema2History{} - for _, compat := range m.ExtractedV1Compatibility { - hitem := Schema2History{ - Created: compat.Created, - CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), - Author: compat.Author, - Comment: compat.Comment, - EmptyLayer: compat.ThrowAway, - } - convertedHistory = append([]Schema2History{hitem}, convertedHistory...) - } - // Build the rootfs information. We need the decompressed sums that we've been - // calculating to fill in the DiffIDs. It's expected (but not enforced by us) - // that the number of diffIDs corresponds to the number of non-EmptyLayer - // entries in the history. - rootFS := &Schema2RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - // And now for some raw manipulation. - raw := make(map[string]*json.RawMessage) - err = json.Unmarshal(config, &raw) - if err != nil { - return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1) - } - // Drop some fields. - delete(raw, "id") - delete(raw, "parent") - delete(raw, "parent_id") - delete(raw, "layer_id") - delete(raw, "throwaway") - delete(raw, "Size") - // Add the history and rootfs information. - rootfs, err := json.Marshal(rootFS) - if err != nil { - return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) - } - rawRootfs := json.RawMessage(rootfs) - raw["rootfs"] = &rawRootfs - history, err := json.Marshal(convertedHistory) - if err != nil { - return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) - } - rawHistory := json.RawMessage(history) - raw["history"] = &rawHistory - // Encode the result. - config, err = json.Marshal(raw) - if err != nil { - return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) - } - return config, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { - image, err := m.ToSchema2Config(diffIDs) - if err != nil { - return "", err - } - return digest.FromBytes(image).Hex(), nil -} diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go deleted file mode 100644 index 76a80e5a6..000000000 --- a/vendor/github.com/containers/image/manifest/docker_schema2.go +++ /dev/null @@ -1,255 +0,0 @@ -package manifest - -import ( - "encoding/json" - "time" - - "github.com/containers/image/pkg/strslice" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -type Schema2Descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - -// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. -func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { - return types.BlobInfo{ - Digest: desc.Digest, - Size: desc.Size, - URLs: desc.URLs, - MediaType: desc.MediaType, - } -} - -// Schema2 is a manifest in docker/distribution schema 2. -type Schema2 struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor Schema2Descriptor `json:"config"` - LayersDescriptors []Schema2Descriptor `json:"layers"` -} - -// Schema2Port is a Port, a string containing port number and protocol in the -// format "80/tcp", from docker/go-connections/nat. -type Schema2Port string - -// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from -// docker/go-connections/nat. -type Schema2PortSet map[Schema2Port]struct{} - -// Schema2HealthConfig is a HealthConfig, which holds configuration settings -// for the HEALTHCHECK feature, from docker/docker/api/types/container. -type Schema2HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Schema2Config is a Config in docker/docker/api/types/container. -type Schema2Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// Schema2V1Image is a V1Image in docker/docker/image. -type Schema2V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Schema2Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Schema2Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. -type Schema2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// Schema2History stores build commands that were used to create an image, from docker/docker/image. -type Schema2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Schema2Image is an Image in docker/docker/image. -type Schema2Image struct { - Schema2V1Image - Parent digest.Digest `json:"parent,omitempty"` - RootFS *Schema2RootFS `json:"rootfs,omitempty"` - History []Schema2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` -} - -// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. -func Schema2FromManifest(manifest []byte) (*Schema2, error) { - s2 := Schema2{} - if err := json.Unmarshal(manifest, &s2); err != nil { - return nil, err - } - return &s2, nil -} - -// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. -func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { - return &Schema2{ - SchemaVersion: 2, - MediaType: DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, - } -} - -// Schema2Clone creates a copy of the supplied Schema2 manifest. -func Schema2Clone(src *Schema2) *Schema2 { - copy := *src - return © -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema2) ConfigInfo() types.BlobInfo { - return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema2) LayerInfos() []LayerInfo { - blobs := []LayerInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, LayerInfo{ - BlobInfo: BlobInfoFromSchema2Descriptor(layer), - EmptyLayer: false, - }) - } - return blobs -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.LayersDescriptors) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) - } - original := m.LayersDescriptors - m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) - for i, info := range layerInfos { - m.LayersDescriptors[i].MediaType = original[i].MediaType - m.LayersDescriptors[i].Digest = info.Digest - m.LayersDescriptors[i].Size = info.Size - m.LayersDescriptors[i].URLs = info.URLs - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema2) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - s2 := &Schema2Image{} - if err := json.Unmarshal(config, s2); err != nil { - return nil, err - } - i := &types.ImageInspectInfo{ - Tag: "", - Created: &s2.Created, - DockerVersion: s2.DockerVersion, - Architecture: s2.Architecture, - Os: s2.OS, - Layers: layerInfosToStrings(m.LayerInfos()), - } - if s2.Config != nil { - i.Labels = s2.Config.Labels - i.Env = s2.Config.Env - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema2) ImageID([]digest.Digest) (string, error) { - if err := m.ConfigDescriptor.Digest.Validate(); err != nil { - return "", err - } - return m.ConfigDescriptor.Digest.Hex(), nil -} diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go deleted file mode 100644 index ae1921b6c..000000000 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ /dev/null @@ -1,244 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - - "github.com/containers/image/types" - "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// FIXME: Should we just use docker/distribution and docker/docker implementations directly? - -// FIXME(runcom, mitr): should we havea mediatype pkg?? -const ( - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" - // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" -) - -// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource -// should request from the backend unless directed otherwise. -var DefaultRequestedManifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2Schema1SignedMediaType, - DockerV2Schema1MediaType, - DockerV2ListMediaType, -} - -// Manifest is an interface for parsing, modifying image manifests in isolation. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members -// directly. -// -// See types.Image for functionality not limited to manifests, including format conversions and config parsing. -// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. -type Manifest interface { - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - ConfigInfo() types.BlobInfo - // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []LayerInfo - // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) - UpdateLayerInfos(layerInfos []types.BlobInfo) error - - // ImageID computes an ID which can uniquely identify this image by its contents, irrespective - // of which (of possibly more than one simultaneously valid) reference was used to locate the - // image, and unchanged by whether or how the layers are compressed. The result takes the form - // of the hexadecimal portion of a digest.Digest. - ImageID(diffIDs []digest.Digest) (string, error) - - // Inspect returns various information for (skopeo inspect) parsed from the manifest, - // incorporating information from a configuration blob returned by configGetter, if - // the underlying image format is expected to include a configuration blob. - Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) - - // Serialize returns the manifest in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! - Serialize() ([]byte, error) -} - -// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. -type LayerInfo struct { - types.BlobInfo - EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. -} - -// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. -// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, -// but we may not have such metadata available (e.g. when the manifest is a local file). -func GuessMIMEType(manifest []byte) string { - // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. - // Also docker/distribution/manifest.Versioned. - meta := struct { - MediaType string `json:"mediaType"` - SchemaVersion int `json:"schemaVersion"` - Signatures interface{} `json:"signatures"` - }{} - if err := json.Unmarshal(manifest, &meta); err != nil { - return "" - } - - switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. - return meta.MediaType - } - // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. - switch meta.SchemaVersion { - case 1: - if meta.Signatures != nil { - return DockerV2Schema1SignedMediaType - } - return DockerV2Schema1MediaType - case 2: - // best effort to understand if this is an OCI image since mediaType - // isn't in the manifest for OCI anymore - // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. - ociMan := struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - Layers []imgspecv1.Descriptor `json:"layers"` - }{} - if err := json.Unmarshal(manifest, &ociMan); err != nil { - return "" - } - if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig && len(ociMan.Layers) != 0 { - return imgspecv1.MediaTypeImageManifest - } - ociIndex := struct { - Manifests []imgspecv1.Descriptor `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &ociIndex); err != nil { - return "" - } - if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest { - return imgspecv1.MediaTypeImageIndex - } - return DockerV2Schema2MediaType - } - return "" -} - -// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifest []byte) (digest.Digest, error) { - if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { - sig, err := libtrust.ParsePrettySignature(manifest, "signatures") - if err != nil { - return "", err - } - manifest, err = sig.Payload() - if err != nil { - // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string - // that libtrust itself has josebase64UrlEncode()d - return "", err - } - } - - return digest.FromBytes(manifest), nil -} - -// MatchesDigest returns true iff the manifest matches expectedDigest. -// Error may be set if this returns false. -// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, -// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { - // This should eventually support various digest types. - actualDigest, err := Digest(manifest) - if err != nil { - return false, err - } - return expectedDigest == actualDigest, nil -} - -// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. -// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). -func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err // Coverage: This can fail only if rand.Reader fails. - } - - js, err := libtrust.NewJSONSignature(manifest) - if err != nil { - return nil, err - } - if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. - return nil, err - } - return js.PrettySignature("signatures") -} - -// MIMETypeIsMultiImage returns true if mimeType is a list of images -func MIMETypeIsMultiImage(mimeType string) bool { - return mimeType == DockerV2ListMediaType -} - -// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, -// centralizing various workarounds. -func NormalizedMIMEType(input string) string { - switch input { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case "application/json": - return DockerV2Schema1SignedMediaType - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2ListMediaType: - return input - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return DockerV2Schema1SignedMediaType - } -} - -// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type -func FromBlob(manblob []byte, mt string) (Manifest, error) { - switch NormalizedMIMEType(mt) { - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: - return Schema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return OCI1FromManifest(manblob) - case DockerV2Schema2MediaType: - return Schema2FromManifest(manblob) - case DockerV2ListMediaType: - return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") - default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) - } -} - -// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() -// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. -func layerInfosToStrings(infos []LayerInfo) []string { - layers := make([]string, len(infos)) - for i, info := range infos { - layers[i] = info.Digest.String() - } - return layers -} diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go deleted file mode 100644 index dd65e0ba2..000000000 --- a/vendor/github.com/containers/image/manifest/oci.go +++ /dev/null @@ -1,130 +0,0 @@ -package manifest - -import ( - "encoding/json" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. -func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { - return types.BlobInfo{ - Digest: desc.Digest, - Size: desc.Size, - URLs: desc.URLs, - Annotations: desc.Annotations, - MediaType: desc.MediaType, - } -} - -// OCI1 is a manifest.Manifest implementation for OCI images. -// The underlying data from imgspecv1.Manifest is also available. -type OCI1 struct { - imgspecv1.Manifest -} - -// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. -func OCI1FromManifest(manifest []byte) (*OCI1, error) { - oci1 := OCI1{} - if err := json.Unmarshal(manifest, &oci1); err != nil { - return nil, err - } - return &oci1, nil -} - -// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. -func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { - return &OCI1{ - imgspecv1.Manifest{ - Versioned: specs.Versioned{SchemaVersion: 2}, - Config: config, - Layers: layers, - }, - } -} - -// OCI1Clone creates a copy of the supplied OCI1 manifest. -func OCI1Clone(src *OCI1) *OCI1 { - return &OCI1{ - Manifest: src.Manifest, - } -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *OCI1) ConfigInfo() types.BlobInfo { - return BlobInfoFromOCI1Descriptor(m.Config) -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *OCI1) LayerInfos() []LayerInfo { - blobs := []LayerInfo{} - for _, layer := range m.Layers { - blobs = append(blobs, LayerInfo{ - BlobInfo: BlobInfoFromOCI1Descriptor(layer), - EmptyLayer: false, - }) - } - return blobs -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.Layers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) - } - original := m.Layers - m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) - for i, info := range layerInfos { - m.Layers[i].MediaType = original[i].MediaType - m.Layers[i].Digest = info.Digest - m.Layers[i].Size = info.Size - m.Layers[i].Annotations = info.Annotations - m.Layers[i].URLs = info.URLs - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *OCI1) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - v1 := &imgspecv1.Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - d1 := &Schema2V1Image{} - json.Unmarshal(config, d1) - i := &types.ImageInspectInfo{ - Tag: "", - Created: v1.Created, - DockerVersion: d1.DockerVersion, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - Layers: layerInfosToStrings(m.LayerInfos()), - Env: d1.Config.Env, - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *OCI1) ImageID([]digest.Digest) (string, error) { - if err := m.Config.Digest.Validate(); err != nil { - return "", err - } - return m.Config.Digest.Hex(), nil -} diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go deleted file mode 100644 index 9571c37e2..000000000 --- a/vendor/github.com/containers/image/oci/archive/oci_dest.go +++ /dev/null @@ -1,151 +0,0 @@ -package archive - -import ( - "context" - "io" - "os" - - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" -) - -type ociArchiveImageDestination struct { - ref ociArchiveReference - unpackedDest types.ImageDestination - tempDirRef tempDirOCIRef -} - -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { - tempDirRef, err := createOCIRef(ref.image) - if err != nil { - return nil, errors.Wrapf(err, "error creating oci reference") - } - unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) - if err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) - } - return nil, err - } - return &ociArchiveImageDestination{ref: ref, - unpackedDest: unpackedDest, - tempDirRef: tempDirRef}, nil -} - -// Reference returns the reference used to set up this destination. -func (d *ociArchiveImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any -// Close deletes the temp directory of the oci-archive image -func (d *ociArchiveImageDestination) Close() error { - defer d.tempDirRef.deleteTempDir() - return d.unpackedDest.Close() -} - -func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { - return d.unpackedDest.SupportedManifestMIMETypes() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures -func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { - return d.unpackedDest.SupportsSignatures(ctx) -} - -func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression { - return d.unpackedDest.DesiredLayerCompression() -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { - return d.unpackedDest.AcceptsForeignLayerURLs() -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise -func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { - return d.unpackedDest.MustMatchRuntimeOS() -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { - return d.unpackedDest.IgnoresEmbeddedDockerReference() -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) -} - -// PutManifest writes manifest to the destination -func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte) error { - return d.unpackedDest.PutManifest(ctx, m) -} - -func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - return d.unpackedDest.PutSignatures(ctx, signatures) -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted -// after the directory is made, it is tarred up into a file and the directory is deleted -func (d *ociArchiveImageDestination) Commit(ctx context.Context) error { - if err := d.unpackedDest.Commit(ctx); err != nil { - return errors.Wrapf(err, "error storing image %q", d.ref.image) - } - - // path of directory to tar up - src := d.tempDirRef.tempDirectory - // path to save tarred up file - dst := d.ref.resolvedFile - return tarDirectory(src, dst) -} - -// tar converts the directory at src and saves it to dst -func tarDirectory(src, dst string) error { - // input is a stream of bytes from the archive of the directory at path - input, err := archive.Tar(src, archive.Uncompressed) - if err != nil { - return errors.Wrapf(err, "error retrieving stream of bytes from %q", src) - } - - // creates the tar file - outFile, err := os.Create(dst) - if err != nil { - return errors.Wrapf(err, "error creating tar file %q", dst) - } - defer outFile.Close() - - // copies the contents of the directory to the tar file - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - _, err = io.Copy(outFile, input) - - return err -} diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go deleted file mode 100644 index ca74f950b..000000000 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ /dev/null @@ -1,102 +0,0 @@ -package archive - -import ( - "context" - "io" - - ocilayout "github.com/containers/image/oci/layout" - "github.com/containers/image/types" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ociArchiveImageSource struct { - ref ociArchiveReference - unpackedSrc types.ImageSource - tempDirRef tempDirOCIRef -} - -// newImageSource returns an ImageSource for reading from an existing directory. -// newImageSource untars the file and saves it in a temp directory -func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { - tempDirRef, err := createUntarTempDir(ref) - if err != nil { - return nil, errors.Wrap(err, "error creating temp directory") - } - - unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) - if err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) - } - return nil, err - } - return &ociArchiveImageSource{ref: ref, - unpackedSrc: unpackedSrc, - tempDirRef: tempDirRef}, nil -} - -// LoadManifestDescriptor loads the manifest -func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { - ociArchRef, ok := imgRef.(ociArchiveReference) - if !ok { - return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") - } - tempDirRef, err := createUntarTempDir(ociArchRef) - if err != nil { - return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory") - } - defer tempDirRef.deleteTempDir() - - descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) - if err != nil { - return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index") - } - return descriptor, nil -} - -// Reference returns the reference used to set up this source. -func (s *ociArchiveImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -// Close deletes the temporary directory at dst -func (s *ociArchiveImageSource) Close() error { - defer s.tempDirRef.deleteTempDir() - return s.unpackedSrc.Close() -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - return s.unpackedSrc.GetManifest(ctx, instanceDigest) -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - return s.unpackedSrc.GetBlob(ctx, info, cache) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return s.unpackedSrc.GetSignatures(ctx, instanceDigest) -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/oci/archive/oci_transport.go deleted file mode 100644 index 7c1d26ba8..000000000 --- a/vendor/github.com/containers/image/oci/archive/oci_transport.go +++ /dev/null @@ -1,192 +0,0 @@ -package archive - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/oci/internal" - ocilayout "github.com/containers/image/oci/layout" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OCI archive -// it creates an oci-archive tar file by calling into the OCI transport -// tarring the directory created by oci and deleting the directory -var Transport = ociArchiveTransport{} - -type ociArchiveTransport struct{} - -// ociArchiveReference is an ImageReference for OCI Archive paths -type ociArchiveReference struct { - file string - resolvedFile string - image string -} - -func (t ociArchiveTransport) Name() string { - return "oci-archive" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix -// into an ImageReference. -func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { - return internal.ValidateScope(scope) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. -func ParseReference(reference string) (types.ImageReference, error) { - file, image := internal.SplitPathAndImage(reference) - return NewReference(file, image) -} - -// NewReference returns an OCI reference for a file and a image. -func NewReference(file, image string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) - if err != nil { - return nil, err - } - - if err := internal.ValidateOCIPath(file); err != nil { - return nil, err - } - - if err := internal.ValidateImageName(image); err != nil { - return nil, err - } - - return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil -} - -func (ref ociArchiveReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -func (ref ociArchiveReference) StringWithinTransport() string { - return fmt.Sprintf("%s:%s", ref.file, ref.image) -} - -// DockerReference returns a Docker reference associated with this reference -func (ref ociArchiveReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -func (ref ociArchiveReference) PolicyConfigurationIdentity() string { - // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the - // same image and the two can’t be statically disambiguated. Using at least the repository directory is - // less granular but hopefully still useful. - return fmt.Sprintf("%s", ref.resolvedFile) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set -func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedFile - for { - lastSlash := strings.LastIndex(path, "/") - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by ociTransport.ValidatePolicyConfigurationScope above. - if lastSlash == -1 || path == "/" { - break - } - res = append(res, path) - path = path[:lastSlash] - } - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") -} - -// struct to store the ociReference and temporary directory returned by createOCIRef -type tempDirOCIRef struct { - tempDirectory string - ociRefExtracted types.ImageReference -} - -// deletes the temporary directory created -func (t *tempDirOCIRef) deleteTempDir() error { - return os.RemoveAll(t.tempDirectory) -} - -// createOCIRef creates the oci reference of the image -func createOCIRef(image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") - if err != nil { - return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") - } - ociRef, err := ocilayout.NewReference(dir, image) - if err != nil { - return tempDirOCIRef{}, err - } - - tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} - return tempDirRef, nil -} - -// creates the temporary directory and copies the tarred content to it -func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) { - tempDirRef, err := createOCIRef(ref.image) - if err != nil { - return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference") - } - src := ref.resolvedFile - dst := tempDirRef.tempDirectory - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - if err := archive.UntarPath(src, dst); err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) - } - return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory) - } - return tempDirRef, nil -} diff --git a/vendor/github.com/containers/image/oci/internal/oci_util.go b/vendor/github.com/containers/image/oci/internal/oci_util.go deleted file mode 100644 index c2012e50e..000000000 --- a/vendor/github.com/containers/image/oci/internal/oci_util.go +++ /dev/null @@ -1,126 +0,0 @@ -package internal - -import ( - "github.com/pkg/errors" - "path/filepath" - "regexp" - "runtime" - "strings" -) - -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) -var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) - -// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. -// In any other case an error is returned. -func ValidateImageName(image string) error { - if len(image) == 0 { - return nil - } - - var err error - if !refRegexp.MatchString(image) { - err = errors.Errorf("Invalid image %s", image) - } - return err -} - -// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. -// Neither path nor image parts are validated at this stage. -func SplitPathAndImage(reference string) (string, string) { - if runtime.GOOS == "windows" { - return splitPathAndImageWindows(reference) - } - return splitPathAndImageNonWindows(reference) -} - -func splitPathAndImageWindows(reference string) (string, string) { - groups := windowsRefRegexp.FindStringSubmatch(reference) - // nil group means no match - if groups == nil { - return reference, "" - } - - // we expect three elements. First one full match, second the capture group for the path and - // the third the capture group for the image - if len(groups) != 3 { - return reference, "" - } - return groups[1], groups[2] -} - -func splitPathAndImageNonWindows(reference string) (string, string) { - sep := strings.SplitN(reference, ":", 2) - path := sep[0] - - var image string - if len(sep) == 2 { - image = sep[1] - } - return path, image -} - -// ValidateOCIPath takes the OCI path and validates it. -func ValidateOCIPath(path string) error { - if runtime.GOOS == "windows" { - // On Windows we must allow for a ':' as part of the path - if strings.Count(path, ":") > 1 { - return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) - } - } else { - if strings.Contains(path, ":") { - return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) - } - } - return nil -} - -// ValidateScope validates a policy configuration scope for an OCI transport. -func ValidateScope(scope string) error { - var err error - if runtime.GOOS == "windows" { - err = validateScopeWindows(scope) - } else { - err = validateScopeNonWindows(scope) - } - if err != nil { - return err - } - - cleaned := filepath.Clean(scope) - if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - - return nil -} - -func validateScopeWindows(scope string) error { - matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) - if !matched { - return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) - } - - return nil -} - -func validateScopeNonWindows(scope string) error { - if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - - return nil -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go deleted file mode 100644 index db102184d..000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ /dev/null @@ -1,306 +0,0 @@ -package layout - -import ( - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - digest "github.com/opencontainers/go-digest" - imgspec "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ociImageDestination struct { - ref ociReference - index imgspecv1.Index - sharedBlobDir string - acceptUncompressedLayers bool -} - -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { - var index *imgspecv1.Index - if indexExists(ref) { - var err error - index, err = ref.getIndex() - if err != nil { - return nil, err - } - } else { - index = &imgspecv1.Index{ - Versioned: imgspec.Versioned{ - SchemaVersion: 2, - }, - } - } - - d := &ociImageDestination{ref: ref, index: *index} - if sys != nil { - d.sharedBlobDir = sys.OCISharedBlobDirPath - d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers - } - - if err := ensureDirectoryExists(d.ref.dir); err != nil { - return nil, err - } - // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, - // but it MAY be empty (e.g. if we never end up calling PutBlob) - // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 - if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { - return nil, err - } - return d, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ociImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ociImageDestination) Close() error { - return nil -} - -func (d *ociImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - imgspecv1.MediaTypeImageManifest, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { - return errors.Errorf("Pushing signatures for OCI images is not supported") -} - -func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { - if d.acceptUncompressedLayers { - return types.PreserveOriginal - } - return types.Compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *ociImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *ociImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") - if err != nil { - return types.BlobInfo{}, err - } - succeeded := false - explicitClosed := false - defer func() { - if !explicitClosed { - blobFile.Close() - } - if !succeeded { - os.Remove(blobFile.Name()) - } - }() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - - // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. - // On Windows, the “permissions of newly created files” argument to syscall.Open is - // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, - // always fails on Windows. - if runtime.GOOS != "windows" { - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err - } - } - - blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) - if err != nil { - return types.BlobInfo{}, err - } - if err := ensureParentDirectoryExists(blobPath); err != nil { - return types.BlobInfo{}, err - } - - // need to explicitly close the file, since a rename won't otherwise not work on Windows - blobFile.Close() - explicitClosed = true - if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err - } - succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) - if err != nil { - return false, types.BlobInfo{}, err - } - finfo, err := os.Stat(blobPath) - if err != nil && os.IsNotExist(err) { - return false, types.BlobInfo{}, nil - } - if err != nil { - return false, types.BlobInfo{}, err - } - return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - desc := imgspecv1.Descriptor{} - desc.Digest = digest - // TODO(runcom): beaware and add support for OCI manifest list - desc.MediaType = imgspecv1.MediaTypeImageManifest - desc.Size = int64(len(m)) - - blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) - if err != nil { - return err - } - if err := ensureParentDirectoryExists(blobPath); err != nil { - return err - } - if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { - return err - } - - if d.ref.image != "" { - annotations := make(map[string]string) - annotations["org.opencontainers.image.ref.name"] = d.ref.image - desc.Annotations = annotations - } - desc.Platform = &imgspecv1.Platform{ - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - } - d.addManifest(&desc) - - return nil -} - -func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { - for i, manifest := range d.index.Manifests { - if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { - // TODO Should there first be a cleanup based on the descriptor we are going to replace? - d.index.Manifests[i] = *desc - return - } - } - d.index.Manifests = append(d.index.Manifests, *desc) -} - -func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - if len(signatures) != 0 { - return errors.Errorf("Pushing signatures for OCI images is not supported") - } - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *ociImageDestination) Commit(ctx context.Context) error { - if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { - return err - } - indexJSON, err := json.Marshal(d.index) - if err != nil { - return err - } - return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) -} - -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -// ensureParentDirectoryExists ensures the parent of the supplied path exists. -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} - -// indexExists checks whether the index location specified in the OCI reference exists. -// The implementation is opinionated, since in case of unexpected errors false is returned -func indexExists(ref ociReference) bool { - _, err := os.Stat(ref.indexPath()) - if err == nil { - return true - } - if os.IsNotExist(err) { - return false - } - return true -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go deleted file mode 100644 index cc536f69e..000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ /dev/null @@ -1,171 +0,0 @@ -package layout - -import ( - "context" - "io" - "io/ioutil" - "net/http" - "os" - "strconv" - - "github.com/containers/image/pkg/tlsclientconfig" - "github.com/containers/image/types" - "github.com/docker/go-connections/tlsconfig" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ociImageSource struct { - ref ociReference - descriptor imgspecv1.Descriptor - client *http.Client - sharedBlobDir string -} - -// newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { - tr := tlsclientconfig.NewTransport() - tr.TLSClientConfig = tlsconfig.ServerDefault() - - if sys != nil && sys.OCICertPath != "" { - if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { - return nil, err - } - tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify - } - - client := &http.Client{} - client.Transport = tr - descriptor, err := ref.getManifestDescriptor() - if err != nil { - return nil, err - } - d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} - if sys != nil { - // TODO(jonboulle): check dir existence? - d.sharedBlobDir = sys.OCISharedBlobDirPath - } - return d, nil -} - -// Reference returns the reference used to set up this source. -func (s *ociImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *ociImageSource) Close() error { - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - var dig digest.Digest - var mimeType string - if instanceDigest == nil { - dig = digest.Digest(s.descriptor.Digest) - mimeType = s.descriptor.MediaType - } else { - dig = *instanceDigest - // XXX: instanceDigest means that we don't immediately have the context of what - // mediaType the manifest has. In OCI this means that we don't know - // what reference it came from, so we just *assume* that its - // MediaTypeImageManifest. - // FIXME: We should actually be able to look up the manifest in the index, - // and see the MIME type there. - mimeType = imgspecv1.MediaTypeImageManifest - } - - manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) - if err != nil { - return nil, "", err - } - m, err := ioutil.ReadFile(manifestPath) - if err != nil { - return nil, "", err - } - - return m, mimeType, nil -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *ociImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - return s.getExternalBlob(ctx, info.URLs) - } - - path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) - if err != nil { - return nil, 0, err - } - - r, err := os.Open(path) - if err != nil { - return nil, 0, err - } - fi, err := r.Stat() - if err != nil { - return nil, 0, err - } - return r, fi.Size(), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return [][]byte{}, nil -} - -func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - errWrap := errors.New("failed fetching external blob from all urls") - for _, url := range urls { - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) - continue - } - - resp, err := s.client.Do(req.WithContext(ctx)) - if err != nil { - errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) - continue - } - - if resp.StatusCode != http.StatusOK { - resp.Body.Close() - errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url) - continue - } - - return resp.Body, getBlobSize(resp), nil - } - - return nil, 0, errWrap -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} - -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return size -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go deleted file mode 100644 index 4e5cecff2..000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_transport.go +++ /dev/null @@ -1,264 +0,0 @@ -package layout - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/oci/internal" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -var ( - // Transport is an ImageTransport for OCI directories. - Transport = ociTransport{} - - // ErrMoreThanOneImage is an error returned when the manifest includes - // more than one image and the user should choose which one to use. - ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") -) - -type ociTransport struct{} - -func (t ociTransport) Name() string { - return "oci" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { - return internal.ValidateScope(scope) -} - -// ociReference is an ImageReference for OCI directory paths. -type ociReference struct { - // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! - // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. - - // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid - // being exposed to symlinks and renames in the parent directories to the working directory). - // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) - dir string // As specified by the user. May be relative, contain symlinks, etc. - resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. - // If image=="", it means the "only image" in the index.json is used in the case it is a source - // for destinations, the image name annotation "image.ref.name" is not added to the index.json - image string -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. -func ParseReference(reference string) (types.ImageReference, error) { - dir, image := internal.SplitPathAndImage(reference) - return NewReference(dir, image) -} - -// NewReference returns an OCI reference for a directory and a image. -// -// We do not expose an API supplying the resolvedDir; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedDir. -func NewReference(dir, image string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) - if err != nil { - return nil, err - } - - if err := internal.ValidateOCIPath(dir); err != nil { - return nil, err - } - - if err = internal.ValidateImageName(image); err != nil { - return nil, err - } - - return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil -} - -func (ref ociReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ociReference) StringWithinTransport() string { - return fmt.Sprintf("%s:%s", ref.dir, ref.image) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ociReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref ociReference) PolicyConfigurationIdentity() string { - // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the - // same image and the two can’t be statically disambiguated. Using at least the repository directory is - // less granular but hopefully still useful. - return fmt.Sprintf("%s", ref.resolvedDir) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ociReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedDir - for { - lastSlash := strings.LastIndex(path, "/") - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by ociTransport.ValidatePolicyConfigurationScope above. - if lastSlash == -1 || path == "/" { - break - } - res = append(res, path) - path = path[:lastSlash] - } - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) -} - -// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together -// with an error. -func (ref ociReference) getIndex() (*imgspecv1.Index, error) { - indexJSON, err := os.Open(ref.indexPath()) - if err != nil { - return nil, err - } - defer indexJSON.Close() - - index := &imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(index); err != nil { - return nil, err - } - return index, nil -} - -func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { - index, err := ref.getIndex() - if err != nil { - return imgspecv1.Descriptor{}, err - } - - var d *imgspecv1.Descriptor - if ref.image == "" { - // return manifest if only one image is in the oci directory - if len(index.Manifests) == 1 { - d = &index.Manifests[0] - } else { - // ask user to choose image when more than one image in the oci directory - return imgspecv1.Descriptor{}, ErrMoreThanOneImage - } - } else { - // if image specified, look through all manifests for a match - for _, md := range index.Manifests { - if md.MediaType != imgspecv1.MediaTypeImageManifest { - continue - } - refName, ok := md.Annotations["org.opencontainers.image.ref.name"] - if !ok { - continue - } - if refName == ref.image { - d = &md - break - } - } - } - if d == nil { - return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) - } - return *d, nil -} - -// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name -// when pulling an image -func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { - ociRef, ok := imgRef.(ociReference) - if !ok { - return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") - } - return ociRef.getManifestDescriptor() -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") -} - -// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. -func (ref ociReference) ociLayoutPath() string { - return filepath.Join(ref.dir, "oci-layout") -} - -// indexPath returns a path for the index.json within a directory using OCI conventions. -func (ref ociReference) indexPath() string { - return filepath.Join(ref.dir, "index.json") -} - -// blobPath returns a path for a blob within a directory using OCI image-layout conventions. -func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { - if err := digest.Validate(); err != nil { - return "", errors.Wrapf(err, "unexpected digest reference %s", digest) - } - blobDir := filepath.Join(ref.dir, "blobs") - if sharedBlobDir != "" { - blobDir = sharedBlobDir - } - return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil -} diff --git a/vendor/github.com/containers/image/openshift/openshift-copies.go b/vendor/github.com/containers/image/openshift/openshift-copies.go deleted file mode 100644 index 01fe71a24..000000000 --- a/vendor/github.com/containers/image/openshift/openshift-copies.go +++ /dev/null @@ -1,1174 +0,0 @@ -package openshift - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "time" - - "github.com/ghodss/yaml" - "github.com/imdario/mergo" - "github.com/pkg/errors" - "golang.org/x/net/http2" - "k8s.io/client-go/util/homedir" -) - -// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig. -// restTLSClientConfig contains settings to enable transport layer security -type restTLSClientConfig struct { - // Server requires TLS client certificate authentication - CertFile string - // Server requires TLS client certificate authentication - KeyFile string - // Trusted root certificates for server - CAFile string - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte -} - -// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config. -// Config holds the common attributes that can be passed to a Kubernetes client on -// initialization. -type restConfig struct { - // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. - // If a URL is given then the (optional) Path of that URL represents a prefix that must - // be appended to all request URIs used to access the apiserver. This allows a frontend - // proxy to easily relocate all of the apiserver endpoints. - Host string - - // Server requires Basic authentication - Username string - Password string - - // Server requires Bearer authentication. This client will not attempt to use - // refresh tokens for an OAuth2 flow. - // TODO: demonstrate an OAuth2 compatible client. - BearerToken string - - // TLSClientConfig contains settings to enable transport layer security - restTLSClientConfig - - // Server should be accessed without verifying the TLS - // certificate. For testing only. - Insecure bool -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. -// ClientConfig is used to make it easy to get an api server client -type clientConfig interface { - // ClientConfig returns a complete client config - ClientConfig() (*restConfig, error) -} - -// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. -func defaultClientConfig() clientConfig { - loadingRules := newOpenShiftClientConfigLoadingRules() - // REMOVED: Allowing command-line overriding of loadingRules - // REMOVED: clientcmd.ConfigOverrides - - clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) - - return clientConfig -} - -var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") - -// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. -// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. -// 1. --config value -// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file -func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { - chain := []string{} - - envVarFile := os.Getenv("KUBECONFIG") - if len(envVarFile) != 0 { - chain = append(chain, filepath.SplitList(envVarFile)...) - } else { - chain = append(chain, recommendedHomeFile) - } - - return &clientConfigLoadingRules{ - Precedence: chain, - // REMOVED: Migration support; run (oc login) to trigger migration - } -} - -// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. -// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules -// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that -// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before -// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid -// passing extraneous information down a call stack -type deferredLoadingClientConfig struct { - loadingRules *clientConfigLoadingRules - - clientConfig clientConfig -} - -// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name -func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { - return &deferredLoadingClientConfig{loadingRules: loadingRules} -} - -func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { - if config.clientConfig == nil { - // REMOVED: Support for concurrent use in multiple threads. - mergedConfig, err := config.loadingRules.Load() - if err != nil { - return nil, err - } - - var mergedClientConfig clientConfig - // REMOVED: Interactive fallback support. - mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig) - - config.clientConfig = mergedClientConfig - } - - return config.clientConfig, nil -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. -// ClientConfig implements ClientConfig -func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { - mergedClientConfig, err := config.createClientConfig() - if err != nil { - return nil, err - } - mergedConfig, err := mergedClientConfig.ClientConfig() - if err != nil { - return nil, err - } - // REMOVED: In-cluster service account configuration use. - - return mergedConfig, nil -} - -var ( - // DefaultCluster is the cluster config used when no other config is specified - // TODO: eventually apiserver should start on 443 and be secure by default - defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} - - // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name - envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} -) - -// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. -// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information -type directClientConfig struct { - config clientcmdConfig -} - -// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. -// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { - return &directClientConfig{config} -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. -// ClientConfig implements ClientConfig -func (config *directClientConfig) ClientConfig() (*restConfig, error) { - if err := config.ConfirmUsable(); err != nil { - return nil, err - } - - configAuthInfo := config.getAuthInfo() - configClusterInfo := config.getCluster() - - clientConfig := &restConfig{} - clientConfig.Host = configClusterInfo.Server - if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { - u.RawQuery = "" - u.Fragment = "" - clientConfig.Host = u.String() - } - - // only try to read the auth information if we are secure - if isConfigTransportTLS(*clientConfig) { - var err error - - // mergo is a first write wins for map value and a last writing wins for interface values - // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. - // Our mergo.Merge version is older than this change. - // REMOVED: Support for interactive fallback. - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, userAuthPartialConfig) - - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, serverAuthPartialConfig) - } - - return clientConfig, nil -} - -// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { - mergedConfig := &restConfig{} - - // configClusterInfo holds the information identify the server provided by .kubeconfig - configClientConfig := &restConfig{} - configClientConfig.CAFile = configClusterInfo.CertificateAuthority - configClientConfig.CAData = configClusterInfo.CertificateAuthorityData - configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify - mergo.Merge(mergedConfig, configClientConfig) - - return mergedConfig, nil -} - -// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identifcation -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { - mergedConfig := &restConfig{} - - // blindly overwrite existing values based on precedence - if len(configAuthInfo.Token) > 0 { - mergedConfig.BearerToken = configAuthInfo.Token - } - if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { - mergedConfig.CertFile = configAuthInfo.ClientCertificate - mergedConfig.CertData = configAuthInfo.ClientCertificateData - mergedConfig.KeyFile = configAuthInfo.ClientKey - mergedConfig.KeyData = configAuthInfo.ClientKeyData - } - if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { - mergedConfig.Username = configAuthInfo.Username - mergedConfig.Password = configAuthInfo.Password - } - - // REMOVED: prompting for missing information. - return mergedConfig, nil -} - -// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser -func canIdentifyUser(config restConfig) bool { - return len(config.Username) > 0 || - (len(config.CertFile) > 0 || len(config.CertData) > 0) || - len(config.BearerToken) > 0 - -} - -// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func (config *directClientConfig) ConfirmUsable() error { - var validationErrors []error - validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) - validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) - // when direct client config is specified, and our only error is that no server is defined, we should - // return a standard "no config" error - if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { - return newErrConfigurationInvalid([]error{errEmptyConfig}) - } - return newErrConfigurationInvalid(validationErrors) -} - -// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. -func (config *directClientConfig) getContextName() string { - // REMOVED: overrides support - return config.config.CurrentContext -} - -// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. -func (config *directClientConfig) getAuthInfoName() string { - // REMOVED: overrides support - return config.getContext().AuthInfo -} - -// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. -func (config *directClientConfig) getClusterName() string { - // REMOVED: overrides support - return config.getContext().Cluster -} - -// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. -func (config *directClientConfig) getContext() clientcmdContext { - contexts := config.config.Contexts - contextName := config.getContextName() - - var mergedContext clientcmdContext - if configContext, exists := contexts[contextName]; exists { - mergo.Merge(&mergedContext, configContext) - } - // REMOVED: overrides support - - return mergedContext -} - -var ( - errEmptyConfig = errors.New("no configuration has been provided") - // message is for consistency with old behavior - errEmptyCluster = errors.New("cluster has no server defined") -) - -// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. -// validateClusterInfo looks for conflicts and errors in the cluster info -func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { - var validationErrors []error - - if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { - return []error{errEmptyCluster} - } - - if len(clusterInfo.Server) == 0 { - if len(clusterName) == 0 { - validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) - } else { - validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) - } - } - // Make sure CA data and CA file aren't both specified - if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) - } - if len(clusterInfo.CertificateAuthority) != 0 { - clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) - } - } - - return validationErrors -} - -// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. -// validateAuthInfo looks for conflicts and errors in the auth info -func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { - var validationErrors []error - - usingAuthPath := false - methods := make([]string, 0, 3) - if len(authInfo.Token) != 0 { - methods = append(methods, "token") - } - if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { - methods = append(methods, "basicAuth") - } - - if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { - // Make sure cert data and file aren't both specified - if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) - } - // Make sure key data and file aren't both specified - if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) - } - // Make sure a key is specified - if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) - } - - if len(authInfo.ClientCertificate) != 0 { - clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) - } - } - if len(authInfo.ClientKey) != 0 { - clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) - } - } - } - - // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case - if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) - } - - return validationErrors -} - -// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. -func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { - authInfos := config.config.AuthInfos - authInfoName := config.getAuthInfoName() - - var mergedAuthInfo clientcmdAuthInfo - if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(&mergedAuthInfo, configAuthInfo) - } - // REMOVED: overrides support - - return mergedAuthInfo -} - -// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. -func (config *directClientConfig) getCluster() clientcmdCluster { - clusterInfos := config.config.Clusters - clusterInfoName := config.getClusterName() - - var mergedClusterInfo clientcmdCluster - mergo.Merge(&mergedClusterInfo, defaultCluster) - mergo.Merge(&mergedClusterInfo, envVarCluster) - if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(&mergedClusterInfo, configClusterInfo) - } - // REMOVED: overrides support - - return mergedClusterInfo -} - -// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregateErr []error - -// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. -// NewAggregate converts a slice of errors into an Aggregate interface, which -// is itself an implementation of the error interface. If the slice is empty, -// this returns nil. -// It will check if any of the element of input error list is nil, to avoid -// nil pointer panic when call Error(). -func newAggregate(errlist []error) error { - if len(errlist) == 0 { - return nil - } - // In case of input error list contains nil - var errs []error - for _, e := range errlist { - if e != nil { - errs = append(errs, e) - } - } - if len(errs) == 0 { - return nil - } - return aggregateErr(errs) -} - -// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. -// Error is part of the error interface. -func (agg aggregateErr) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) - } - result += "]" - return result -} - -// REMOVED: aggregateErr.Errors - -// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. -// errConfigurationInvalid is a set of errors indicating the configuration is invalid. -type errConfigurationInvalid []error - -var _ error = errConfigurationInvalid{} - -// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. - -// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. -func newErrConfigurationInvalid(errs []error) error { - switch len(errs) { - case 0: - return nil - default: - return errConfigurationInvalid(errs) - } -} - -// Error implements the error interface -func (e errConfigurationInvalid) Error() string { - return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) -} - -// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules -// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config -// Callers can put the chain together however they want, but we'd recommend: -// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath -// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present -type clientConfigLoadingRules struct { - Precedence []string -} - -// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load -// Load starts by running the MigrationRules and then -// takes the loading rules and returns a Config object based on following rules. -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice -// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. -// Read errors or files with non-deserializable content produce errors. -// The first file to set a particular map key wins and map key's value is never changed. -// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. -// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. -// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even -// non-conflicting entries from the second file's "red-user" are discarded. -// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder -// and only absolute file paths are returned. -func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { - errlist := []error{} - - kubeConfigFiles := []string{} - - // REMOVED: explicit path support - kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) - - kubeconfigs := []*clientcmdConfig{} - // read and cache the config files so that we only look at them once - for _, filename := range kubeConfigFiles { - if len(filename) == 0 { - // no work to do - continue - } - - config, err := loadFromFile(filename) - if os.IsNotExist(err) { - // skip missing files - continue - } - if err != nil { - errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename)) - continue - } - - kubeconfigs = append(kubeconfigs, config) - } - - // first merge all of our maps - mapConfig := clientcmdNewConfig() - for _, kubeconfig := range kubeconfigs { - mergo.Merge(mapConfig, kubeconfig) - } - - // merge all of the struct values in the reverse order so that priority is given correctly - // errors are not added to the list the second time - nonMapConfig := clientcmdNewConfig() - for i := len(kubeconfigs) - 1; i >= 0; i-- { - kubeconfig := kubeconfigs[i] - mergo.Merge(nonMapConfig, kubeconfig) - } - - // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and - // get the values we expect. - config := clientcmdNewConfig() - mergo.Merge(config, mapConfig) - mergo.Merge(config, nonMapConfig) - - // REMOVED: Possibility to skip this. - if err := resolveLocalPaths(config); err != nil { - errlist = append(errlist, err) - } - - return config, newAggregate(errlist) -} - -// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile -// LoadFromFile takes a filename and deserializes the contents into Config object -func loadFromFile(filename string) (*clientcmdConfig, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - config, err := load(kubeconfigBytes) - if err != nil { - return nil, err - } - - // set LocationOfOrigin on every Cluster, User, and Context - for key, obj := range config.AuthInfos { - obj.LocationOfOrigin = filename - config.AuthInfos[key] = obj - } - for key, obj := range config.Clusters { - obj.LocationOfOrigin = filename - config.Clusters[key] = obj - } - for key, obj := range config.Contexts { - obj.LocationOfOrigin = filename - config.Contexts[key] = obj - } - - if config.AuthInfos == nil { - config.AuthInfos = map[string]*clientcmdAuthInfo{} - } - if config.Clusters == nil { - config.Clusters = map[string]*clientcmdCluster{} - } - if config.Contexts == nil { - config.Contexts = map[string]*clientcmdContext{} - } - - return config, nil -} - -// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load -// Load takes a byte slice and deserializes the contents into Config object. -// Encapsulates deserialization without assuming the source is a file. -func load(data []byte) (*clientcmdConfig, error) { - config := clientcmdNewConfig() - // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) - if len(data) == 0 { - return config, nil - } - // Note: This does absolutely no kind/version checking or conversions. - data, err := yaml.YAMLToJSON(data) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, config); err != nil { - return nil, err - } - return config, nil -} - -// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. -// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin -// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without -// modification of its contents. -func resolveLocalPaths(config *clientcmdConfig) error { - for _, cluster := range config.Clusters { - if len(cluster.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) - if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) - } - - if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { - return err - } - } - for _, authInfo := range config.AuthInfos { - if len(authInfo.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) - if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) - } - - if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - } - - return nil -} - -// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. -func getClusterFileReferences(cluster *clientcmdCluster) []*string { - return []*string{&cluster.CertificateAuthority} -} - -// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. -func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { - return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} -} - -// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. -// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory -func resolvePaths(refs []*string, base string) error { - for _, ref := range refs { - // Don't resolve empty paths - if len(*ref) > 0 { - // Don't resolve absolute paths - if !filepath.IsAbs(*ref) { - *ref = filepath.Join(base, *ref) - } - } - } - return nil -} - -// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor. -// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config -// object. Note that a RESTClient may require fields that are optional when initializing a Client. -// A RESTClient created by this method is generic - it expects to operate on an API that follows -// the Kubernetes conventions, but may not be the Kubernetes API. -func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { - // REMOVED: Configurable GroupVersion, Codec - // REMOVED: Configurable versionedAPIPath - baseURL, err := defaultServerURLFor(config) - if err != nil { - return nil, nil, err - } - - transport, err := transportFor(config) - if err != nil { - return nil, nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - } - - // REMOVED: Configurable QPS, Burst, ContentConfig - // REMOVED: Actually returning a RESTClient object. - return baseURL, httpClient, nil -} - -// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL. -// DefaultServerURL converts a host, host:port, or URL string to the default base server API path -// to use with a Client at a given API version following the standard conventions for a -// Kubernetes API. -func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { - if host == "" { - return nil, errors.Errorf("host must be a URL or a host:port pair") - } - base := host - hostURL, err := url.Parse(base) - if err != nil { - return nil, err - } - if hostURL.Scheme == "" { - scheme := "http://" - if defaultTLS { - scheme = "https://" - } - hostURL, err = url.Parse(scheme + base) - if err != nil { - return nil, err - } - if hostURL.Path != "" && hostURL.Path != "/" { - return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) - } - } - - // REMOVED: versionedAPIPath computation. - return hostURL, nil -} - -// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor. -// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It -// requires Host and Version to be set prior to being called. -func defaultServerURLFor(config *restConfig) (*url.URL, error) { - // TODO: move the default to secure when the apiserver supports TLS by default - // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." - hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 - hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 - defaultTLS := hasCA || hasCert || config.Insecure - host := config.Host - if host == "" { - host = "localhost" - } - - // REMOVED: Configurable APIPath, GroupVersion - return defaultServerURL(host, defaultTLS) -} - -// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor. -// TransportFor returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. Will return the -// default http.DefaultTransport if no special case behavior is needed. -func transportFor(config *restConfig) (http.RoundTripper, error) { - // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support - return transportNew(config) -} - -// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS. -// IsConfigTransportTLS returns true if and only if the provided -// config will result in a protected connection to the server when it -// is passed to restclient.RESTClientFor(). Use to determine when to -// send credentials over the wire. -// -// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are -// still possible. -func isConfigTransportTLS(config restConfig) bool { - baseURL, err := defaultServerURLFor(&config) - if err != nil { - return false - } - return baseURL.Scheme == "https" -} - -// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. -// New returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. -func transportNew(config *restConfig) (http.RoundTripper, error) { - // REMOVED: custom config.Transport support. - // Set transport level security - - var ( - rt http.RoundTripper - err error - ) - - rt, err = tlsCacheGet(config) - if err != nil { - return nil, err - } - - // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. - if len(config.Username) != 0 && len(config.BearerToken) != 0 { - return nil, errors.Errorf("username/password or bearer token may be set, but not both") - } - - return rt, nil -} - -// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. -// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if -// no matching CIDRs are found -func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it - noProxyEnv := os.Getenv("NO_PROXY") - noProxyRules := strings.Split(noProxyEnv, ",") - - cidrs := []*net.IPNet{} - for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) - } - } - - if len(cidrs) == 0 { - return delegate - } - - return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) - if ip == nil { - return delegate(req) - } - - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } - } - - return delegate(req) - } -} - -// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. -func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { - // REMOVED: any actual caching - - // Get the TLS options for this client config - tlsConfig, err := tlsConfigFor(config) - if err != nil { - return nil, err - } - // The options didn't require a custom TLS config - if tlsConfig == nil { - return http.DefaultTransport, nil - } - - // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. - t := &http.Transport{ - // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings - // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY - Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - } - // Allow clients to disable http2 if needed. - if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { - _ = http2.ConfigureTransport(t) - } - return t, nil -} - -// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. -// TLSConfigFor returns a tls.Config that will provide the transport level security defined -// by the provided Config. Will return nil if no transport level security is requested. -func tlsConfigFor(c *restConfig) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { - return nil, nil - } - if c.HasCA() && c.Insecure { - return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") - } - if err := loadTLSFiles(c); err != nil { - return nil, err - } - - tlsConfig := &tls.Config{ - // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: c.Insecure, - } - - if c.HasCA() { - tlsConfig.RootCAs = rootCertPool(c.CAData) - } - - if c.HasCertAuth() { - cert, err := tls.X509KeyPair(c.CertData, c.KeyData) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - return tlsConfig, nil -} - -// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. -// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, -// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are -// either populated or were empty to start. -func loadTLSFiles(c *restConfig) error { - var err error - c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) - if err != nil { - return err - } - - c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) - if err != nil { - return err - } - - c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) - if err != nil { - return err - } - return nil -} - -// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. -// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, -// or an error if an error occurred reading the file -func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { - if len(data) > 0 { - return data, nil - } - if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, err - } - return fileData, nil - } - return nil, nil -} - -// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. -// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". -// When caData is not empty, it will be the ONLY information used in the CertPool. -func rootCertPool(caData []byte) *x509.CertPool { - // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go - // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values - // It doesn't allow trusting either/or, but hopefully that won't be an issue - if len(caData) == 0 { - return nil - } - - // if we have caData, use it - certPool := x509.NewCertPool() - certPool.AppendCertsFromPEM(caData) - return certPool -} - -// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. -// HasCA returns whether the configuration has a certificate authority or not. -func (c *restConfig) HasCA() bool { - return len(c.CAData) > 0 || len(c.CAFile) > 0 -} - -// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. -// HasCertAuth returns whether the configuration has certificate authentication or not. -func (c *restConfig) HasCertAuth() bool { - return len(c.CertData) != 0 || len(c.CertFile) != 0 -} - -// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() -type clientcmdConfig struct { - // Clusters is a map of referencable names to cluster configs - Clusters clustersMap `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos authInfosMap `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts contextsMap `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` -} - -type clustersMap map[string]*clientcmdCluster - -func (m *clustersMap) UnmarshalJSON(data []byte) error { - var a []v1NamedCluster - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - cluster := e.Cluster // Allocates a new instance in each iteration - (*m)[e.Name] = &cluster - } - return nil -} - -type authInfosMap map[string]*clientcmdAuthInfo - -func (m *authInfosMap) UnmarshalJSON(data []byte) error { - var a []v1NamedAuthInfo - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - authInfo := e.AuthInfo // Allocates a new instance in each iteration - (*m)[e.Name] = &authInfo - } - return nil -} - -type contextsMap map[string]*clientcmdContext - -func (m *contextsMap) UnmarshalJSON(data []byte) error { - var a []v1NamedContext - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - context := e.Context // Allocates a new instance in each iteration - (*m)[e.Name] = &context - } - return nil -} - -// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func clientcmdNewConfig() *clientcmdConfig { - return &clientcmdConfig{ - Clusters: make(map[string]*clientcmdCluster), - AuthInfos: make(map[string]*clientcmdAuthInfo), - Contexts: make(map[string]*clientcmdContext), - } -} - -// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. -// Cluster contains information about how to communicate with a kubernetes cluster -type clientcmdCluster struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` -} - -// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type clientcmdAuthInfo struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // ClientCertificate is the path to a client cert file for TLS. - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - ClientKeyData []byte `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - Token string `json:"token,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - Password string `json:"password,omitempty"` -} - -// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type clientcmdContext struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - Namespace string `json:"namespace,omitempty"` -} - -// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. -// NamedCluster relates nicknames to cluster information -type v1NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name"` - // Cluster holds the cluster information - Cluster clientcmdCluster `json:"cluster"` -} - -// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. -// NamedContext relates nicknames to context information -type v1NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name"` - // Context holds the context information - Context clientcmdContext `json:"context"` -} - -// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. -// NamedAuthInfo relates nicknames to auth information -type v1NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name"` - // AuthInfo holds the auth information - AuthInfo clientcmdAuthInfo `json:"user"` -} diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go deleted file mode 100644 index 814c3eea1..000000000 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ /dev/null @@ -1,562 +0,0 @@ -package openshift - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/containers/image/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/image/version" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// openshiftClient is configuration for dealing with a single image stream, for reading or writing. -type openshiftClient struct { - ref openshiftReference - baseURL *url.URL - // Values from Kubernetes configuration - httpClient *http.Client - bearerToken string // "" if not used - username string // "" if not used - password string // if username != "" -} - -// newOpenshiftClient creates a new openshiftClient for the specified reference. -func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { - // We have already done this parsing in ParseReference, but thrown away - // httpClient. So, parse again. - // (We could also rework/split restClientFor to "get base URL" to be done - // in ParseReference, and "get httpClient" to be done here. But until/unless - // we support non-default clusters, this is good enough.) - - // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. - cmdConfig := defaultClientConfig() - logrus.Debugf("cmdConfig: %#v", cmdConfig) - restConfig, err := cmdConfig.ClientConfig() - if err != nil { - return nil, err - } - // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) - logrus.Debugf("restConfig: %#v", restConfig) - baseURL, httpClient, err := restClientFor(restConfig) - if err != nil { - return nil, err - } - logrus.Debugf("URL: %#v", *baseURL) - - if httpClient == nil { - httpClient = http.DefaultClient - } - - return &openshiftClient{ - ref: ref, - baseURL: baseURL, - httpClient: httpClient, - bearerToken: restConfig.BearerToken, - username: restConfig.Username, - password: restConfig.Password, - }, nil -} - -// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. -func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { - url := *c.baseURL - url.Path = path - var requestBodyReader io.Reader - if requestBody != nil { - logrus.Debugf("Will send body: %s", requestBody) - requestBodyReader = bytes.NewReader(requestBody) - } - req, err := http.NewRequest(method, url.String(), requestBodyReader) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - if len(c.bearerToken) != 0 { - req.Header.Set("Authorization", "Bearer "+c.bearerToken) - } else if len(c.username) != 0 { - req.SetBasicAuth(c.username, c.password) - } - req.Header.Set("Accept", "application/json, */*") - req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) - if requestBody != nil { - req.Header.Set("Content-Type", "application/json") - } - - logrus.Debugf("%s %s", method, url.String()) - res, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - logrus.Debugf("Got body: %s", body) - // FIXME: Just throwing this useful information away only to try to guess later... - logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) - - var status status - statusValid := false - if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { - statusValid = true - } - - switch { - case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. - if statusValid && status.Status != "Success" { - return nil, errors.New(status.Message) - } - case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: - // OK. - default: - if statusValid { - return nil, errors.New(status.Message) - } - return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) - } - - return body, nil -} - -// getImage loads the specified image object. -func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) - body, err := c.doRequest(ctx, "GET", path, nil) - if err != nil { - return nil, err - } - // Note: This does absolutely no kind/version checking or conversions. - var isi imageStreamImage - if err := json.Unmarshal(body, &isi); err != nil { - return nil, err - } - return &isi.Image, nil -} - -// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; -// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. -func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { - parts := strings.SplitN(ref, "/", 2) - if len(parts) != 2 { - return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) - } - return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil -} - -type openshiftImageSource struct { - client *openshiftClient - // Values specific to this image - sys *types.SystemContext - // State - docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet - imageStreamImageName string // Resolved image identifier, or "" if not known yet -} - -// newImageSource creates a new ImageSource for the specified reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - return &openshiftImageSource{ - client: client, - sys: sys, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *openshiftImageSource) Reference() types.ImageReference { - return s.client.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *openshiftImageSource) Close() error { - if s.docker != nil { - err := s.docker.Close() - s.docker = nil - - return err - } - - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, "", err - } - return s.docker.GetManifest(ctx, instanceDigest) -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, 0, err - } - return s.docker.GetBlob(ctx, info, cache) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - var imageName string - if instanceDigest == nil { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, err - } - imageName = s.imageStreamImageName - } else { - imageName = instanceDigest.String() - } - image, err := s.client.getImage(ctx, imageName) - if err != nil { - return nil, err - } - var sigs [][]byte - for _, sig := range image.Signatures { - if sig.Type == imageSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} - -// ensureImageIsResolved sets up s.docker and s.imageStreamImageName -func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { - if s.docker != nil { - return nil - } - - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) - body, err := s.client.doRequest(ctx, "GET", path, nil) - if err != nil { - return err - } - // Note: This does absolutely no kind/version checking or conversions. - var is imageStream - if err := json.Unmarshal(body, &is); err != nil { - return err - } - var te *tagEvent - for _, tag := range is.Status.Tags { - if tag.Tag != s.client.ref.dockerReference.Tag() { - continue - } - if len(tag.Items) > 0 { - te = &tag.Items[0] - break - } - } - if te == nil { - return errors.Errorf("No matching tag found") - } - logrus.Debugf("tag event %#v", te) - dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) - if err != nil { - return err - } - logrus.Debugf("Resolved reference %#v", dockerRefString) - dockerRef, err := docker.ParseReference("//" + dockerRefString) - if err != nil { - return err - } - d, err := dockerRef.NewImageSource(ctx, s.sys) - if err != nil { - return err - } - s.docker = d - s.imageStreamImageName = te.Image - return nil -} - -type openshiftImageDestination struct { - client *openshiftClient - docker types.ImageDestination // The Docker Registry endpoint - // State - imageStreamImageName string // "" if not yet known -} - -// newImageDestination creates a new ImageDestination for the specified reference. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, - // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know - // the manifest digest at this point. - dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) - dockerRef, err := docker.ParseReference(dockerRefString) - if err != nil { - return nil, err - } - docker, err := dockerRef.NewImageDestination(ctx, sys) - if err != nil { - return nil, err - } - - return &openshiftImageDestination{ - client: client, - docker: docker, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *openshiftImageDestination) Reference() types.ImageReference { - return d.client.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *openshiftImageDestination) Close() error { - return d.docker.Close() -} - -func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { - return d.docker.SupportedManifestMIMETypes() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { - return d.docker.IgnoresEmbeddedDockerReference() -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte) error { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return err - } - d.imageStreamImageName = manifestDigest.String() - - return d.docker.PutManifest(ctx, m) -} - -func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - if d.imageStreamImageName == "" { - return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") - } - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures. - - if len(signatures) == 0 { - return nil // No need to even read the old state. - } - - image, err := d.client.getImage(ctx, d.imageStreamImageName) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range image.Signatures { - existingSigNames[sig.objectMeta.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range image.Signatures { - if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "Error generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - // Note: This does absolutely no kind/version checking or conversions. - sig := imageSignature{ - typeMeta: typeMeta{ - Kind: "ImageSignature", - APIVersion: "v1", - }, - objectMeta: objectMeta{Name: signatureName}, - Type: imageSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body) - if err != nil { - return err - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *openshiftImageDestination) Commit(ctx context.Context) error { - return d.docker.Commit(ctx) -} - -// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. -type imageStream struct { - Status imageStreamStatus `json:"status,omitempty"` -} -type imageStreamStatus struct { - DockerImageRepository string `json:"dockerImageRepository"` - Tags []namedTagEventList `json:"tags,omitempty"` -} -type namedTagEventList struct { - Tag string `json:"tag"` - Items []tagEvent `json:"items"` -} -type tagEvent struct { - DockerImageReference string `json:"dockerImageReference"` - Image string `json:"image"` -} -type imageStreamImage struct { - Image image `json:"image"` -} -type image struct { - objectMeta `json:"metadata,omitempty"` - DockerImageReference string `json:"dockerImageReference,omitempty"` - // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` - DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` - DockerImageManifest string `json:"dockerImageManifest,omitempty"` - // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` - Signatures []imageSignature `json:"signatures,omitempty"` -} - -const imageSignatureTypeAtomic string = "atomic" - -type imageSignature struct { - typeMeta `json:",inline"` - objectMeta `json:"metadata,omitempty"` - Type string `json:"type"` - Content []byte `json:"content"` - // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - // ImageIdentity string `json:"imageIdentity,omitempty"` - // SignedClaims map[string]string `json:"signedClaims,omitempty"` - // Created *unversioned.Time `json:"created,omitempty"` - // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` - // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` -} -type typeMeta struct { - Kind string `json:"kind,omitempty"` - APIVersion string `json:"apiVersion,omitempty"` -} -type objectMeta struct { - Name string `json:"name,omitempty"` - GenerateName string `json:"generateName,omitempty"` - Namespace string `json:"namespace,omitempty"` - SelfLink string `json:"selfLink,omitempty"` - ResourceVersion string `json:"resourceVersion,omitempty"` - Generation int64 `json:"generation,omitempty"` - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status -type status struct { - Status string `json:"status,omitempty"` - Message string `json:"message,omitempty"` - // Reason StatusReason `json:"reason,omitempty"` - // Details *StatusDetails `json:"details,omitempty"` - Code int32 `json:"code,omitempty"` -} diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go deleted file mode 100644 index b27867a0d..000000000 --- a/vendor/github.com/containers/image/openshift/openshift_transport.go +++ /dev/null @@ -1,157 +0,0 @@ -package openshift - -import ( - "context" - "fmt" - "regexp" - "strings" - - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - genericImage "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OpenShift registry-hosted images. -var Transport = openshiftTransport{} - -type openshiftTransport struct{} - -func (t openshiftTransport) Name() string { - return "atomic" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// Note that imageNameRegexp is namespace/stream:tag, this -// is HOSTNAME/namespace/stream:tag or parent prefixes. -// Keep this in sync with imageNameRegexp! -var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { - if scopeRegexp.FindStringIndex(scope) == nil { - return errors.Errorf("Invalid scope name %s", scope) - } - return nil -} - -// openshiftReference is an ImageReference for OpenShift images. -type openshiftReference struct { - dockerReference reference.NamedTagged - namespace string // Computed from dockerReference in advance. - stream string // Computed from dockerReference in advance. -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. -func ParseReference(ref string) (types.ImageReference, error) { - r, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) - } - tagged, ok := r.(reference.NamedTagged) - if !ok { - return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) - } - return NewReference(tagged) -} - -// NewReference returns an OpenShift reference for a reference.NamedTagged -func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { - r := strings.SplitN(reference.Path(dockerRef), "/", 3) - if len(r) != 2 { - return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", - reference.FamiliarString(dockerRef)) - } - return openshiftReference{ - namespace: r[0], - stream: r[1], - dockerReference: dockerRef, - }, nil -} - -func (ref openshiftReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref openshiftReference) StringWithinTransport() string { - return reference.FamiliarString(ref.dockerReference) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref openshiftReference) DockerReference() reference.Named { - return ref.dockerReference -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref openshiftReference) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) - if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref openshiftReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(sys, ref) - if err != nil { - return nil, err - } - return genericImage.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for atomic: images") -} diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go deleted file mode 100644 index 06a905aed..000000000 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ /dev/null @@ -1,504 +0,0 @@ -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "syscall" - "time" - "unsafe" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/klauspost/pgzip" - "github.com/opencontainers/go-digest" - selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/ostreedev/ostree-go/pkg/otbuiltin" - "github.com/pkg/errors" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -type blobToImport struct { - Size int64 - Digest digest.Digest - BlobPath string -} - -type descriptor struct { - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` -} - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type manifestSchema struct { - LayersDescriptors []descriptor `json:"layers"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` -} - -type ostreeImageDestination struct { - ref ostreeReference - manifest string - schema manifestSchema - tmpDirPath string - blobs map[string]*blobToImport - digest digest.Digest - signaturesLen int - repo *C.struct_OstreeRepo -} - -// newImageDestination returns an ImageDestination for writing to an existing ostree. -func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { - tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) - if err := ensureDirectoryExists(tmpDirPath); err != nil { - return nil, err - } - return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ostreeImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ostreeImageDestination) Close() error { - if d.repo != nil { - C.g_object_unref(C.gpointer(d.repo)) - } - return os.RemoveAll(d.tmpDirPath) -} - -func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.PreserveOriginal -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { - return true -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") - if err != nil { - return types.BlobInfo{}, err - } - - blobPath := filepath.Join(tmpDir, "content") - blobFile, err := os.Create(blobPath) - if err != nil { - return types.BlobInfo{}, err - } - defer blobFile.Close() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - - hash := computedDigest.Hex() - d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { - entries, err := ioutil.ReadDir(dir) - if err != nil { - return err - } - - for _, info := range entries { - fullpath := filepath.Join(dir, info.Name()) - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { - if err := os.Remove(fullpath); err != nil { - return err - } - continue - } - - if selinuxHnd != nil { - relPath, err := filepath.Rel(root, fullpath) - if err != nil { - return err - } - // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, - // thus we benefit from maintaining the same SELinux label they would have on the host as we could - // use hard links instead of copying the files. - relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) - - relPathC := C.CString(relPath) - defer C.free(unsafe.Pointer(relPathC)) - var context *C.char - - res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) - if int(res) < 0 && err != syscall.ENOENT { - return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) - } - if int(res) == 0 { - defer C.freecon(context) - fullpathC := C.CString(fullpath) - defer C.free(unsafe.Pointer(fullpathC)) - res, err = C.lsetfilecon_raw(fullpathC, context) - if int(res) < 0 { - return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context)) - } - } - } - - if info.IsDir() { - if usermode { - if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { - return err - } - } - err = fixFiles(selinuxHnd, root, fullpath, usermode) - if err != nil { - return err - } - } else if usermode && (info.Mode().IsRegular()) { - if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { - return err - } - } - } - - return nil -} - -func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { - opts := otbuiltin.NewCommitOptions() - opts.AddMetadataString = metadata - opts.Timestamp = time.Now() - // OCI layers have no parent OSTree commit - opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" - _, err := repo.Commit(root, branch, opts) - return err -} - -func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { - mfz := pgzip.NewWriter(output) - defer mfz.Close() - metaPacker := storage.NewJSONPacker(mfz) - - stream, err := os.OpenFile(file, os.O_RDONLY, 0) - if err != nil { - return "", -1, err - } - defer stream.Close() - - gzReader, err := archive.DecompressStream(stream) - if err != nil { - return "", -1, err - } - defer gzReader.Close() - - its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) - if err != nil { - return "", -1, err - } - - digester := digest.Canonical.Digester() - - written, err := io.Copy(digester.Hash(), its) - if err != nil { - return "", -1, err - } - - return digester.Digest(), written, nil -} - -func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") - if err := ensureDirectoryExists(destinationPath); err != nil { - return err - } - defer func() { - os.Remove(blob.BlobPath) - os.RemoveAll(destinationPath) - }() - - var tarSplitOutput bytes.Buffer - uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) - if err != nil { - return err - } - - if os.Getuid() == 0 { - if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { - return err - } - if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { - return err - } - } else { - os.MkdirAll(destinationPath, 0755) - if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { - return err - } - - if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { - return err - } - } - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), - fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), - fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), - fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) - -} - -func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - destinationPath := filepath.Dir(blob.BlobPath) - - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - if d.repo == nil { - repo, err := openRepo(d.ref.repo) - if err != nil { - return false, types.BlobInfo{}, err - } - d.repo = repo - } - branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) - - found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") - if err != nil || !found { - return found, types.BlobInfo{}, err - } - - found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") - if err != nil || !found { - return found, types.BlobInfo{}, err - } - - found, data, err = readMetadata(d.repo, branch, "docker.size") - if err != nil || !found { - return found, types.BlobInfo{}, err - } - - size, err := strconv.ParseInt(data, 10, 64) - if err != nil { - return false, types.BlobInfo{}, err - } - - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { - d.manifest = string(manifestBlob) - - if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { - return err - } - - manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) - if err := ensureParentDirectoryExists(manifestPath); err != nil { - return err - } - - digest, err := manifest.Digest(manifestBlob) - if err != nil { - return err - } - d.digest = digest - - return ioutil.WriteFile(manifestPath, manifestBlob, 0644) -} - -func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) - if err := ensureParentDirectoryExists(path); err != nil { - return err - } - - for i, sig := range signatures { - signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { - return err - } - } - d.signaturesLen = len(signatures) - return nil -} - -func (d *ostreeImageDestination) Commit(ctx context.Context) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - repo, err := otbuiltin.OpenRepo(d.ref.repo) - if err != nil { - return err - } - - _, err = repo.PrepareTransaction() - if err != nil { - return err - } - - var selinuxHnd *C.struct_selabel_handle - - if os.Getuid() == 0 && selinux.GetEnabled() { - selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) - if selinuxHnd == nil { - return errors.Wrapf(err, "cannot open the SELinux DB") - } - - defer C.selabel_close(selinuxHnd) - } - - checkLayer := func(hash string) error { - blob := d.blobs[hash] - // if the blob is not present in d.blobs then it is already stored in OSTree, - // and we don't need to import it. - if blob == nil { - return nil - } - err := d.importBlob(selinuxHnd, repo, blob) - if err != nil { - return err - } - - delete(d.blobs, hash) - return nil - } - for _, layer := range d.schema.LayersDescriptors { - hash := layer.Digest.Hex() - if err = checkLayer(hash); err != nil { - return err - } - } - for _, layer := range d.schema.FSLayers { - hash := layer.BlobSum.Hex() - if err = checkLayer(hash); err != nil { - return err - } - } - - // Import the other blobs that are not layers - for _, blob := range d.blobs { - err := d.importConfig(repo, blob) - if err != nil { - return err - } - } - - manifestPath := filepath.Join(d.tmpDirPath, "manifest") - - metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), - fmt.Sprintf("signatures=%d", d.signaturesLen), - fmt.Sprintf("docker.digest=%s", string(d.digest))} - if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { - return err - } - - _, err = repo.CommitTransaction() - return err -} - -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go deleted file mode 100644 index 43d8f6837..000000000 --- a/vendor/github.com/containers/image/ostree/ostree_src.go +++ /dev/null @@ -1,416 +0,0 @@ -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - "unsafe" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/ioutils" - "github.com/klauspost/pgzip" - digest "github.com/opencontainers/go-digest" - glib "github.com/ostreedev/ostree-go/pkg/glibobject" - "github.com/pkg/errors" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -type ostreeImageSource struct { - ref ostreeReference - tmpDir string - repo *C.struct_OstreeRepo - // get the compressed layer by its uncompressed checksum - compressed map[digest.Digest]digest.Digest -} - -// newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { - return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil -} - -// Reference returns the reference used to set up this source. -func (s *ostreeImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *ostreeImageSource) Close() error { - if s.repo != nil { - C.g_object_unref(C.gpointer(s.repo)) - } - return nil -} - -func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { - var metadataKey string - if isCompressed { - metadataKey = "docker.uncompressed_size" - } else { - metadataKey = "docker.size" - } - b := fmt.Sprintf("ociimage/%s", blob) - found, data, err := readMetadata(s.repo, b, metadataKey) - if err != nil || !found { - return 0, err - } - return strconv.ParseInt(data, 10, 64) -} - -func (s *ostreeImageSource) getLenSignatures() (int64, error) { - b := fmt.Sprintf("ociimage/%s", s.ref.branchName) - found, data, err := readMetadata(s.repo, b, "signatures") - if err != nil { - return -1, err - } - if !found { - // if 'signatures' is not present, just return 0 signatures. - return 0, nil - } - return strconv.ParseInt(data, 10, 64) -} - -func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { - b := fmt.Sprintf("ociimage/%s", blob) - found, out, err := readMetadata(s.repo, b, "tarsplit.output") - if err != nil || !found { - return nil, err - } - return base64.StdEncoding.DecodeString(out) -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) - } - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, "", err - } - s.repo = repo - } - - b := fmt.Sprintf("ociimage/%s", s.ref.branchName) - found, out, err := readMetadata(s.repo, b, "docker.manifest") - if err != nil { - return nil, "", err - } - if !found { - return nil, "", errors.New("manifest not found") - } - m := []byte(out) - return m, manifest.GuessMIMEType(m), nil -} - -func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return nil, "", errors.New("manifest lists are not supported by this transport") -} - -func openRepo(path string) (*C.struct_OstreeRepo, error) { - var cerr *C.GError - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - pathc := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(pathc)) - repo := C.ostree_repo_new(pathc) - r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) - if !r { - C.g_object_unref(C.gpointer(repo)) - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - return repo, nil -} - -type ostreePathFileGetter struct { - repo *C.struct_OstreeRepo - parentRoot *C.GFile -} - -type ostreeReader struct { - stream *C.GFileInputStream -} - -func (o ostreeReader) Close() error { - C.g_object_unref(C.gpointer(o.stream)) - return nil -} -func (o ostreeReader) Read(p []byte) (int, error) { - var cerr *C.GError - instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) - stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) - - b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) - if b == nil { - return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - defer C.g_bytes_unref(b) - - count := int(C.g_bytes_get_size(b)) - if count == 0 { - return 0, io.EOF - } - data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] - copy(p, data) - return count, nil -} - -func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { - var cerr *C.GError - var ref *C.char - defer C.free(unsafe.Pointer(ref)) - - cCommit := C.CString(commit) - defer C.free(unsafe.Pointer(cCommit)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { - return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - if ref == nil { - return false, "", nil - } - - var variant *C.GVariant - if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { - return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - defer C.g_variant_unref(variant) - if variant != nil { - cKey := C.CString(key) - defer C.free(unsafe.Pointer(cKey)) - - metadata := C.g_variant_get_child_value(variant, 0) - defer C.g_variant_unref(metadata) - - data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) - if data != nil { - defer C.g_variant_unref(data) - ptr := (*C.char)(C.g_variant_get_string(data, nil)) - val := C.GoString(ptr) - return true, val, nil - } - } - return false, "", nil -} - -func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { - var cerr *C.GError - var parentRoot *C.GFile - cCommit := C.CString(commit) - defer C.free(unsafe.Pointer(cCommit)) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { - return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - C.g_object_ref(C.gpointer(repo)) - - return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil -} - -func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { - var file *C.GFile - if strings.HasPrefix(filename, "./") { - filename = filename[2:] - } - cfilename := C.CString(filename) - defer C.free(unsafe.Pointer(cfilename)) - - file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) - - var cerr *C.GError - stream := C.g_file_read(file, nil, &cerr) - if stream == nil { - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - return &ostreeReader{stream: stream}, nil -} - -func (o ostreePathFileGetter) Close() { - C.g_object_unref(C.gpointer(o.repo)) - C.g_object_unref(C.gpointer(o.parentRoot)) -} - -func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { - getter, err := newOSTreePathFileGetter(s.repo, commit) - if err != nil { - return nil, err - } - defer getter.Close() - - return getter.Get(path) -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - - blob := info.Digest.Hex() - - // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. - if s.compressed == nil { - _, err := s.LayerInfosForCopy(ctx) - if err != nil { - return nil, -1, err - } - - } - compressedBlob, isCompressed := s.compressed[info.Digest] - if isCompressed { - blob = compressedBlob.Hex() - } - branch := fmt.Sprintf("ociimage/%s", blob) - - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, 0, err - } - s.repo = repo - } - - layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) - if err != nil { - return nil, 0, err - } - - tarsplit, err := s.getTarSplitData(blob) - if err != nil { - return nil, 0, err - } - - // if tarsplit is nil we are looking at the manifest. Return directly the file in /content - if tarsplit == nil { - file, err := s.readSingleFile(branch, "/content") - if err != nil { - return nil, 0, err - } - return file, layerSize, nil - } - - mf := bytes.NewReader(tarsplit) - mfz, err := pgzip.NewReader(mf) - if err != nil { - return nil, 0, err - } - metaUnpacker := storage.NewJSONUnpacker(mfz) - - getter, err := newOSTreePathFileGetter(s.repo, branch) - if err != nil { - mfz.Close() - return nil, 0, err - } - - ots := asm.NewOutputTarStream(getter, metaUnpacker) - - rc := ioutils.NewReadCloserWrapper(ots, func() error { - getter.Close() - mfz.Close() - return ots.Close() - }) - return rc, layerSize, nil -} - -func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.New("manifest lists are not supported by this transport") - } - lenSignatures, err := s.getLenSignatures() - if err != nil { - return nil, err - } - branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) - - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, err - } - s.repo = repo - } - - signatures := [][]byte{} - for i := int64(1); i <= lenSignatures; i++ { - sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) - if err != nil { - return nil, err - } - defer sigReader.Close() - - sig, err := ioutil.ReadAll(sigReader) - if err != nil { - return nil, err - } - signatures = append(signatures, sig) - } - return signatures, nil -} - -// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of -// the image, after they've been decompressed. -func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - updatedBlobInfos := []types.BlobInfo{} - manifestBlob, manifestType, err := s.GetManifest(ctx, nil) - if err != nil { - return nil, err - } - - man, err := manifest.FromBlob(manifestBlob, manifestType) - - s.compressed = make(map[digest.Digest]digest.Digest) - - layerBlobs := man.LayerInfos() - - for _, layerBlob := range layerBlobs { - branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) - found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") - if err != nil || !found { - return nil, err - } - - found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") - if err != nil || !found { - return nil, err - } - - uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) - if err != nil { - return nil, err - } - uncompressedDigest := digest.Digest(uncompressedDigestStr) - blobInfo := types.BlobInfo{ - Digest: uncompressedDigest, - Size: uncompressedSize, - MediaType: layerBlob.MediaType, - } - s.compressed[uncompressedDigest] = layerBlob.Digest - updatedBlobInfos = append(updatedBlobInfos, blobInfo) - } - return updatedBlobInfos, nil -} diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go deleted file mode 100644 index 2e86623ac..000000000 --- a/vendor/github.com/containers/image/ostree/ostree_transport.go +++ /dev/null @@ -1,252 +0,0 @@ -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -const defaultOSTreeRepo = "/ostree/repo" - -// Transport is an ImageTransport for ostree paths. -var Transport = ostreeTransport{} - -type ostreeTransport struct{} - -func (t ostreeTransport) Name() string { - return "ostree" -} - -func init() { - transports.Register(Transport) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { - sep := strings.Index(scope, ":") - if sep < 0 { - return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) - } - repo := scope[:sep] - - if !strings.HasPrefix(repo, "/") { - return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) - } - cleaned := filepath.Clean(repo) - if cleaned != repo { - return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - - // FIXME? In the namespaces within a repo, - // we could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// ostreeReference is an ImageReference for ostree paths. -type ostreeReference struct { - image string - branchName string - repo string -} - -type ostreeImageCloser struct { - types.ImageCloser - size int64 -} - -func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { - var repo = "" - var image = "" - s := strings.SplitN(ref, "@/", 2) - if len(s) == 1 { - image, repo = s[0], defaultOSTreeRepo - } else { - image, repo = s[0], "/"+s[1] - } - - return NewReference(image, repo) -} - -// NewReference returns an OSTree reference for a specified repo and image. -func NewReference(image string, repo string) (types.ImageReference, error) { - // image is not _really_ in a containers/image/docker/reference format; - // as far as the libOSTree ociimage/* namespace is concerned, it is more or - // less an arbitrary string with an implied tag. - // Parse the image using reference.ParseNormalizedNamed so that we can - // check whether the images has a tag specified and we can add ":latest" if needed - ostreeImage, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if reference.IsNameOnly(ostreeImage) { - image = image + ":latest" - } - - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) - if err != nil { - // With os.IsNotExist(err), the parent directory of repo is also not existent; - // that should ordinarily not happen, but it would be a bit weird to reject - // references which do not specify a repo just because the implicit defaultOSTreeRepo - // does not exist. - if os.IsNotExist(err) && repo == defaultOSTreeRepo { - resolved = repo - } else { - return nil, err - } - } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) - } - - return ostreeReference{ - image: image, - branchName: encodeOStreeRef(image), - repo: resolved, - }, nil -} - -func (ref ostreeReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ostreeReference) StringWithinTransport() string { - return fmt.Sprintf("%s@%s", ref.image, ref.repo) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ostreeReference) DockerReference() reference.Named { - return nil -} - -func (ref ostreeReference) PolicyConfigurationIdentity() string { - return fmt.Sprintf("%s:%s", ref.repo, ref.image) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ostreeReference) PolicyConfigurationNamespaces() []string { - s := strings.SplitN(ref.image, ":", 2) - if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. - panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) - } - name := s[0] - res := []string{} - for { - res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} - -func (s *ostreeImageCloser) Size() (int64, error) { - return s.size, nil -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - src, err := newImageSource(tmpDir, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - return newImageSource(tmpDir, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - return newImageDestination(ref, tmpDir) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for ostree: images") -} - -var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) - -func encodeOStreeRef(in string) string { - var buffer bytes.Buffer - for i := range in { - sub := in[i : i+1] - if ostreeRefRegexp.MatchString(sub) { - buffer.WriteString(sub) - } else { - buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) - } - - } - return buffer.String() -} - -// manifestPath returns a path for the manifest within a ostree using our conventions. -func (ref ostreeReference) manifestPath() string { - return filepath.Join("manifest", "manifest.json") -} - -// signaturePath returns a path for a signature within a ostree using our conventions. -func (ref ostreeReference) signaturePath(index int) string { - return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) -} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go deleted file mode 100644 index 19d0a6c80..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go +++ /dev/null @@ -1,332 +0,0 @@ -// Package boltdb implements a BlobInfoCache backed by BoltDB. -package boltdb - -import ( - "fmt" - "os" - "sync" - "time" - - "github.com/containers/image/pkg/blobinfocache/internal/prioritize" - "github.com/containers/image/types" - bolt "github.com/etcd-io/bbolt" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -var ( - // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade - // we can simply start over with a different filename; update blobInfoCacheFilename. - - // FIXME: For CRI-O, does this need to hide information between different users? - - // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. - uncompressedDigestBucket = []byte("uncompressedDigest") - // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest - // (as a set of key=digest, value="" pairs) - digestByUncompressedBucket = []byte("digestByUncompressed") - // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing - // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). - knownLocationsBucket = []byte("knownLocations") -) - -// Concurrency: -// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely -// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. - -// pathLock contains a lock for a specific BoltDB database path. -type pathLock struct { - refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! - mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. -} - -var ( - // pathLocks contains a lock for each currently open file. - // This must be global so that independently created instances of boltDBCache exclude each other. - // The map is protected by pathLocksMutex. - // FIXME? Should this be based on device:inode numbers instead of paths instead? - pathLocks = map[string]*pathLock{} - pathLocksMutex = sync.Mutex{} -) - -// lockPath obtains the pathLock for path. -// The caller must call unlockPath eventually. -func lockPath(path string) { - pl := func() *pathLock { // A scope for defer - pathLocksMutex.Lock() - defer pathLocksMutex.Unlock() - pl, ok := pathLocks[path] - if ok { - pl.refCount++ - } else { - pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} - pathLocks[path] = pl - } - return pl - }() - pl.mutex.Lock() -} - -// unlockPath releases the pathLock for path. -func unlockPath(path string) { - pathLocksMutex.Lock() - defer pathLocksMutex.Unlock() - pl, ok := pathLocks[path] - if !ok { - // Should this return an error instead? BlobInfoCache ultimately ignores errors… - panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) - } - pl.mutex.Unlock() - pl.refCount-- - if pl.refCount == 0 { - delete(pathLocks, path) - } -} - -// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. -// -// Note that we don’t keep the database open across operations, because that would lock the file and block any other -// users; instead, we need to open/close it for every single write or lookup. -type cache struct { - path string -} - -// New returns a BlobInfoCache implementation which uses a BoltDB file at path. -// -// Most users should call blobinfocache.DefaultCache instead. -func New(path string) types.BlobInfoCache { - return &cache{path: path} -} - -// view returns runs the specified fn within a read-only transaction on the database. -func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { - // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, - // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding - // a read lock, blocking any future writes. - // Hence this preliminary check, which is RACY: Another process could remove the file - // between the Lstat call and opening the database. - if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { - return err - } - - lockPath(bdc.path) - defer unlockPath(bdc.path) - db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer func() { - if err := db.Close(); retErr == nil && err != nil { - retErr = err - } - }() - - return db.View(fn) -} - -// update returns runs the specified fn within a read-write transaction on the database. -func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { - lockPath(bdc.path) - defer unlockPath(bdc.path) - db, err := bolt.Open(bdc.path, 0600, nil) - if err != nil { - return err - } - defer func() { - if err := db.Close(); retErr == nil && err != nil { - retErr = err - } - }() - - return db.Update(fn) -} - -// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. -func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { - if b := tx.Bucket(uncompressedDigestBucket); b != nil { - if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { - d, err := digest.Parse(string(uncompressedBytes)) - if err == nil { - return d - } - // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - } - // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. - // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings - // when we already record a (compressed, uncompressed) pair. - if b := tx.Bucket(digestByUncompressedBucket); b != nil { - if b = b.Bucket([]byte(anyDigest.String())); b != nil { - c := b.Cursor() - if k, _ := c.First(); k != nil { // The bucket is non-empty - return anyDigest - } - } - } - return "" -} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - var res digest.Digest - if err := bdc.view(func(tx *bolt.Tx) error { - res = bdc.uncompressedDigest(tx, anyDigest) - return nil - }); err != nil { // Including os.IsNotExist(err) - return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - return res -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) - if err != nil { - return err - } - key := []byte(anyDigest.String()) - if previousBytes := b.Get(key); previousBytes != nil { - previous, err := digest.Parse(string(previousBytes)) - if err != nil { - return err - } - if previous != uncompressed { - logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) - } - } - if err := b.Put(key, []byte(uncompressed.String())); err != nil { - return err - } - - b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) - if err != nil { - return err - } - if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. - return err - } - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) - if err != nil { - return err - } - value, err := time.Now().MarshalBinary() - if err != nil { - return err - } - if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. - return err - } - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { - b := scopeBucket.Bucket([]byte(digest.String())) - if b == nil { - return candidates - } - _ = b.ForEach(func(k, v []byte) error { - t := time.Time{} - if err := t.UnmarshalBinary(v); err != nil { - return err - } - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: types.BICLocationReference{Opaque: string(k)}, - }, - LastSeen: t, - }) - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? - return candidates -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - res := []prioritize.CandidateWithTime{} - var uncompressedDigestValue digest.Digest // = "" - if err := bdc.view(func(tx *bolt.Tx) error { - scopeBucket := tx.Bucket(knownLocationsBucket) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) - if scopeBucket == nil { - return nil - } - - res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) - if canSubstitute { - if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { - b := tx.Bucket(digestByUncompressedBucket) - if b != nil { - b = b.Bucket([]byte(uncompressedDigestValue.String())) - if b != nil { - if err := b.ForEach(func(k, _ []byte) error { - d, err := digest.Parse(string(k)) - if err != nil { - return err - } - if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, d) - } - return nil - }); err != nil { - return err - } - } - } - if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) - } - } - } - return nil - }); err != nil { // Including os.IsNotExist(err) - return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - - return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) -} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go deleted file mode 100644 index 357333215..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/default.go +++ /dev/null @@ -1,75 +0,0 @@ -package blobinfocache - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - - "github.com/containers/image/pkg/blobinfocache/boltdb" - "github.com/containers/image/pkg/blobinfocache/memory" - "github.com/containers/image/types" - "github.com/sirupsen/logrus" -) - -const ( - // blobInfoCacheFilename is the file name used for blob info caches. - // If the format changes in an incompatible way, increase the version number. - blobInfoCacheFilename = "blob-info-cache-v1.boltdb" - // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. - systemBlobInfoCacheDir = "/var/lib/containers/cache" -) - -// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid. -// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. -func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { - if sys != nil && sys.BlobInfoCacheDir != "" { - return sys.BlobInfoCacheDir, nil - } - - // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged - // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. - if euid == 0 { - if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil - } - return systemBlobInfoCacheDir, nil - } - - // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. - dataDir := os.Getenv("XDG_DATA_HOME") - if dataDir == "" { - home := os.Getenv("HOME") - if home == "" { - return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") - } - dataDir = filepath.Join(home, ".local", "share") - } - return filepath.Join(dataDir, "containers", "cache"), nil -} - -func getRootlessUID() int { - uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Geteuid() -} - -// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. -func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { - dir, err := blobInfoCacheDir(sys, getRootlessUID()) - if err != nil { - logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) - return memory.New() - } - path := filepath.Join(dir, blobInfoCacheFilename) - if err := os.MkdirAll(dir, 0700); err != nil { - logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) - return memory.New() - } - - logrus.Debugf("Using blob info cache at %s", path) - return boltdb.New(path) -} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go deleted file mode 100644 index 5479319de..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go +++ /dev/null @@ -1,110 +0,0 @@ -// Package prioritize provides utilities for prioritizing locations in -// types.BlobInfoCache.CandidateLocations. -package prioritize - -import ( - "sort" - "time" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, -// and therefore ultimately by types.BlobInfoCache.CandidateLocations. -// This is a heuristic/guess, and could well use a different value. -const replacementAttempts = 5 - -// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. -type CandidateWithTime struct { - Candidate types.BICReplacementCandidate // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) -} - -// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, -// along with the specially-treated digest values for the implementation of sort.Interface.Less -type candidateSortState struct { - cs []CandidateWithTime // The entries to sort - primaryDigest digest.Digest // The digest the user actually asked for - uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest -} - -func (css *candidateSortState) Len() int { - return len(css.cs) -} - -func (css *candidateSortState) Less(i, j int) bool { - xi := css.cs[i] - xj := css.cs[j] - - // primaryDigest entries come first, more recent first. - // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. - // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) - - // First, deal with the primaryDigest/uncompressedDigest cases: - if xi.Candidate.Digest != xj.Candidate.Digest { - // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter - if xi.Candidate.Digest == css.primaryDigest { - return true - } - if xj.Candidate.Digest == css.primaryDigest { - return false - } - if css.uncompressedDigest != "" { - if xi.Candidate.Digest == css.uncompressedDigest { - return false - } - if xj.Candidate.Digest == css.uncompressedDigest { - return true - } - } - } else { // xi.Candidate.Digest == xj.Candidate.Digest - // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time - if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { - return xi.LastSeen.After(xj.LastSeen) - } - } - - // Neither of the digests are primaryDigest/uncompressedDigest: - if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time - return xi.LastSeen.After(xj.LastSeen) - } - // Fall back to digest, if timestamps end up _exactly_ the same (how?!) - return xi.Candidate.Digest < xj.Candidate.Digest -} - -func (css *candidateSortState) Swap(i, j int) { - css.cs[i], css.cs[j] = css.cs[j], css.cs[i] -} - -// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the -// number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { - // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should - // compare equal. - sort.Sort(&candidateSortState{ - cs: cs, - primaryDigest: primaryDigest, - uncompressedDigest: uncompressedDigest, - }) - - resLength := len(cs) - if resLength > maxCandidates { - resLength = maxCandidates - } - res := make([]types.BICReplacementCandidate, resLength) - for i := range res { - res[i] = cs[i].Candidate - } - return res -} - -// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, -// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), -// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. -// -// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course -// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) -func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { - return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) -} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go deleted file mode 100644 index dfb338634..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package memory implements an in-memory BlobInfoCache. -package memory - -import ( - "sync" - "time" - - "github.com/containers/image/pkg/blobinfocache/internal/prioritize" - "github.com/containers/image/types" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -// locationKey only exists to make lookup in knownLocations easier. -type locationKey struct { - transport string - scope types.BICTransportScope - blobDigest digest.Digest -} - -// cache implements an in-memory-only BlobInfoCache -type cache struct { - mutex sync.Mutex - // The following fields can only be accessed with mutex held. - uncompressedDigests map[digest.Digest]digest.Digest - digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest - knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference -} - -// New returns a BlobInfoCache implementation which is in-memory only. -// -// This is primarily intended for tests, but also used as a fallback -// if blobinfocache.DefaultCache can’t determine, or set up, the -// location for a persistent cache. Most users should use -// blobinfocache.DefaultCache. instead of calling this directly. -// Manual users of types.{ImageSource,ImageDestination} might also use -// this instead of a persistent cache. -func New() types.BlobInfoCache { - return &cache{ - uncompressedDigests: map[digest.Digest]digest.Digest{}, - digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, - knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, - } -} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - mem.mutex.Lock() - defer mem.mutex.Unlock() - return mem.uncompressedDigestLocked(anyDigest) -} - -// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. -func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { - if d, ok := mem.uncompressedDigests[anyDigest]; ok { - return d - } - // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. - // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings - // when we already record a (compressed, uncompressed) pair. - if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { - return anyDigest - } - return "" -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { - mem.mutex.Lock() - defer mem.mutex.Unlock() - if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { - logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) - } - mem.uncompressedDigests[anyDigest] = uncompressed - - anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] - if !ok { - anyDigestSet = map[digest.Digest]struct{}{} - mem.digestsByUncompressed[uncompressed] = anyDigestSet - } - anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { - mem.mutex.Lock() - defer mem.mutex.Unlock() - key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} - locationScope, ok := mem.knownLocations[key] - if !ok { - locationScope = map[types.BICLocationReference]time.Time{} - mem.knownLocations[key] = locationScope - } - locationScope[location] = time.Now() // Possibly overwriting an older entry. -} - -// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { - locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present - for l, t := range locations { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: l, - }, - LastSeen: t, - }) - } - return candidates -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - mem.mutex.Lock() - defer mem.mutex.Unlock() - res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) - var uncompressedDigest digest.Digest // = "" - if canSubstitute { - if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { - otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map - for d := range otherDigests { - if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d) - } - } - if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) - } - } - } - return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) -} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go deleted file mode 100644 index e5dca25ce..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go +++ /dev/null @@ -1,49 +0,0 @@ -// Package none implements a dummy BlobInfoCache which records no data. -package none - -import ( - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -// noCache implements a dummy BlobInfoCache which records no data. -type noCache struct { -} - -// NoCache implements BlobInfoCache by not recording any data. -// -// This exists primarily for implementations of configGetter for -// Manifest.Inspect, because configs only have one representation. -// Any use of BlobInfoCache with blobs should usually use at least a -// short-lived cache, ideally blobinfocache.DefaultCache. -var NoCache types.BlobInfoCache = noCache{} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - return "" -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - return nil -} diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go deleted file mode 100644 index aad2bfcf2..000000000 --- a/vendor/github.com/containers/image/pkg/compression/compression.go +++ /dev/null @@ -1,94 +0,0 @@ -package compression - -import ( - "bytes" - "compress/bzip2" - "io" - "io/ioutil" - - "github.com/klauspost/pgzip" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/ulikunitz/xz" -) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc func(io.Reader) (io.ReadCloser, error) - -// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. -func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { - return pgzip.NewReader(r) -} - -// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. -func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { - return ioutil.NopCloser(bzip2.NewReader(r)), nil -} - -// XzDecompressor is a DecompressorFunc for the xz compression algorithm. -func XzDecompressor(r io.Reader) (io.ReadCloser, error) { - r, err := xz.NewReader(r) - if err != nil { - return nil, err - } - return ioutil.NopCloser(r), nil -} - -// compressionAlgos is an internal implementation detail of DetectCompression -var compressionAlgos = map[string]struct { - prefix []byte - decompressor DecompressorFunc -}{ - "gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952) - "bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress) - "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt) -} - -// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. -// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. -func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { - buffer := [8]byte{} - - n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. - // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. - return nil, nil, err - } - - var decompressor DecompressorFunc - for name, algo := range compressionAlgos { - if bytes.HasPrefix(buffer[:n], algo.prefix) { - logrus.Debugf("Detected compression format %s", name) - decompressor = algo.decompressor - break - } - } - if decompressor == nil { - logrus.Debugf("No compression detected") - } - - return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil -} - -// AutoDecompress takes a stream and returns an uncompressed version of the -// same stream. -// The caller must call Close() on the returned stream (even if the input does not need, -// or does not even support, closing!). -func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { - decompressor, stream, err := DetectCompression(stream) - if err != nil { - return nil, false, errors.Wrapf(err, "Error detecting compression") - } - var res io.ReadCloser - if decompressor != nil { - res, err = decompressor(stream) - if err != nil { - return nil, false, errors.Wrapf(err, "Error initializing decompression") - } - } else { - res = ioutil.NopCloser(stream) - } - return res, decompressor != nil, nil -} diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go deleted file mode 100644 index eef629d5c..000000000 --- a/vendor/github.com/containers/image/pkg/docker/config/config.go +++ /dev/null @@ -1,344 +0,0 @@ -package config - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/containers/image/types" - helperclient "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/pkg/homedir" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerAuthConfig struct { - Auth string `json:"auth,omitempty"` -} - -type dockerConfigFile struct { - AuthConfigs map[string]dockerAuthConfig `json:"auths"` - CredHelpers map[string]string `json:"credHelpers,omitempty"` -} - -var ( - defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") - xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") - dockerHomePath = filepath.FromSlash(".docker/config.json") - dockerLegacyHomePath = ".dockercfg" - - enableKeyring = false - - // ErrNotLoggedIn is returned for users not logged into a registry - // that they are trying to logout of - ErrNotLoggedIn = errors.New("not logged in") - // ErrNotSupported is returned for unsupported methods - ErrNotSupported = errors.New("not supported") -) - -// SetAuthentication stores the username and password in the auth.json file -func SetAuthentication(sys *types.SystemContext, registry, username, password string) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - if ch, exists := auths.CredHelpers[registry]; exists { - return false, setAuthToCredHelper(ch, registry, username, password) - } - - // Set the credentials to kernel keyring if enableKeyring is true. - // The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms. - // Hence, we want to fall-back to using the authfile in case the keyring failed. - // However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring. - if enableKeyring { - err := setAuthToKernelKeyring(registry, username, password) - if err == nil { - logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username) - return false, nil - } - logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err) - } - creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - newCreds := dockerAuthConfig{Auth: creds} - auths.AuthConfigs[registry] = newCreds - return true, nil - }) -} - -// GetAuthentication returns the registry credentials stored in -// either auth.json file or .docker/config.json -// If an entry is not found empty strings are returned for the username and password -func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debug("Returning credentials from DockerAuthConfig") - return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil - } - - if enableKeyring { - username, password, err := getAuthFromKernelKeyring(registry) - if err == nil { - logrus.Debug("returning credentials from kernel keyring") - return username, password, nil - } - } - - dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath) - var paths []string - pathToAuth, err := getPathToAuth(sys) - if err == nil { - paths = append(paths, pathToAuth) - } else { - // Error means that the path set for XDG_RUNTIME_DIR does not exist - // but we don't want to completely fail in the case that the user is pulling a public image - // Logging the error as a warning instead and moving on to pulling the image - logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) - } - paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath) - - for _, path := range paths { - legacyFormat := path == dockerLegacyPath - username, password, err := findAuthentication(registry, path, legacyFormat) - if err != nil { - logrus.Debugf("Credentials not found") - return "", "", err - } - if username != "" && password != "" { - logrus.Debugf("Returning credentials from %s", path) - return username, password, nil - } - } - logrus.Debugf("Credentials not found") - return "", "", nil -} - -// RemoveAuthentication deletes the credentials stored in auth.json -func RemoveAuthentication(sys *types.SystemContext, registry string) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - // First try cred helpers. - if ch, exists := auths.CredHelpers[registry]; exists { - return false, deleteAuthFromCredHelper(ch, registry) - } - - // Next if keyring is enabled try kernel keyring - if enableKeyring { - err := deleteAuthFromKernelKeyring(registry) - if err == nil { - logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry) - return false, nil - } - logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles") - } - - if _, ok := auths.AuthConfigs[registry]; ok { - delete(auths.AuthConfigs, registry) - } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { - delete(auths.AuthConfigs, normalizeRegistry(registry)) - } else { - return false, ErrNotLoggedIn - } - return true, nil - }) -} - -// RemoveAllAuthentication deletes all the credentials stored in auth.json -func RemoveAllAuthentication(sys *types.SystemContext) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - auths.CredHelpers = make(map[string]string) - auths.AuthConfigs = make(map[string]dockerAuthConfig) - return true, nil - }) -} - -// getPath gets the path of the auth.json file -// The path can be overriden by the user if the overwrite-path flag is set -// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers -// Otherwise, the auth.json file is stored in /run/containers/UID -func getPathToAuth(sys *types.SystemContext) (string, error) { - if sys != nil { - if sys.AuthFilePath != "" { - return sys.AuthFilePath, nil - } - if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil - } - } - - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - if runtimeDir != "" { - // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. - // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. - _, err := os.Stat(runtimeDir) - if os.IsNotExist(err) { - // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory - // or made a typo while setting the environment variable, - // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. - return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) - } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. - return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil - } - return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil -} - -// readJSONFile unmarshals the authentications stored in the auth.json file and returns it -// or returns an empty dockerConfigFile data structure if auth.json does not exist -// if the file exists and is empty, readJSONFile returns an error -func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { - var auths dockerConfigFile - - raw, err := ioutil.ReadFile(path) - if err != nil { - if os.IsNotExist(err) { - auths.AuthConfigs = map[string]dockerAuthConfig{} - return auths, nil - } - return dockerConfigFile{}, err - } - - if legacyFormat { - if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { - return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) - } - return auths, nil - } - - if err = json.Unmarshal(raw, &auths); err != nil { - return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) - } - - return auths, nil -} - -// modifyJSON writes to auth.json if the dockerConfigFile has been updated -func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { - path, err := getPathToAuth(sys) - if err != nil { - return err - } - - dir := filepath.Dir(path) - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err = os.MkdirAll(dir, 0700); err != nil { - return errors.Wrapf(err, "error creating directory %q", dir) - } - } - - auths, err := readJSONFile(path, false) - if err != nil { - return errors.Wrapf(err, "error reading JSON file %q", path) - } - - updated, err := editor(&auths) - if err != nil { - return errors.Wrapf(err, "error updating %q", path) - } - if updated { - newData, err := json.MarshalIndent(auths, "", "\t") - if err != nil { - return errors.Wrapf(err, "error marshaling JSON %q", path) - } - - if err = ioutil.WriteFile(path, newData, 0755); err != nil { - return errors.Wrapf(err, "error writing to file %q", path) - } - } - - return nil -} - -func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds, err := helperclient.Get(p, registry) - if err != nil { - return "", "", err - } - return creds.Username, creds.Secret, nil -} - -func setAuthToCredHelper(credHelper, registry, username, password string) error { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds := &credentials.Credentials{ - ServerURL: registry, - Username: username, - Secret: password, - } - return helperclient.Store(p, creds) -} - -func deleteAuthFromCredHelper(credHelper, registry string) error { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - return helperclient.Erase(p, registry) -} - -// findAuthentication looks for auth of registry in path -func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { - auths, err := readJSONFile(path, legacyFormat) - if err != nil { - return "", "", errors.Wrapf(err, "error reading JSON file %q", path) - } - - // First try cred helpers. They should always be normalized. - if ch, exists := auths.CredHelpers[registry]; exists { - return getAuthFromCredHelper(ch, registry) - } - - // I'm feeling lucky - if val, exists := auths.AuthConfigs[registry]; exists { - return decodeDockerAuth(val.Auth) - } - - // bad luck; let's normalize the entries first - registry = normalizeRegistry(registry) - normalizedAuths := map[string]dockerAuthConfig{} - for k, v := range auths.AuthConfigs { - normalizedAuths[normalizeRegistry(k)] = v - } - if val, exists := normalizedAuths[registry]; exists { - return decodeDockerAuth(val.Auth) - } - return "", "", nil -} - -func decodeDockerAuth(s string) (string, string, error) { - decoded, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { - // if it's invalid just skip, as docker does - return "", "", nil - } - user := parts[0] - password := strings.Trim(parts[1], "\x00") - return user, password, nil -} - -// convertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry/auth.go -func convertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -func normalizeRegistry(registry string) string { - normalized := convertToHostname(registry) - switch normalized { - case "registry-1.docker.io", "docker.io": - return "index.docker.io" - } - return normalized -} diff --git a/vendor/github.com/containers/image/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/pkg/docker/config/config_linux.go deleted file mode 100644 index 4d66a50df..000000000 --- a/vendor/github.com/containers/image/pkg/docker/config/config_linux.go +++ /dev/null @@ -1,79 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/containers/image/pkg/keyctl" - "github.com/pkg/errors" -) - -func getAuthFromKernelKeyring(registry string) (string, string, error) { - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return "", "", err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return "", "", err - } - authData, err := key.Get() - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(authData), "\x00", 2) - if len(parts) != 2 { - return "", "", nil - } - return parts[0], parts[1], nil -} - -func deleteAuthFromKernelKeyring(registry string) error { - userkeyring, err := keyctl.UserKeyring() - - if err != nil { - return err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return err - } - return key.Unlink() -} - -func setAuthToKernelKeyring(registry, username, password string) error { - keyring, err := keyctl.SessionKeyring() - if err != nil { - return err - } - id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) - if err != nil { - return err - } - - // sets all permission(view,read,write,search,link,set attribute) for current user - // it enables the user to search the key after it linked to user keyring and unlinked from session keyring - err = keyctl.SetPerm(id, keyctl.PermUserAll) - if err != nil { - return err - } - // link the key to userKeyring - userKeyring, err := keyctl.UserKeyring() - if err != nil { - return errors.Wrapf(err, "error getting user keyring") - } - err = keyctl.Link(userKeyring, id) - if err != nil { - return errors.Wrapf(err, "error linking the key to user keyring") - } - // unlink the key from session keyring - err = keyctl.Unlink(keyring, id) - if err != nil { - return errors.Wrapf(err, "error unlinking the key from session keyring") - } - return nil -} - -func genDescription(registry string) string { - return fmt.Sprintf("container-registry-login:%s", registry) -} diff --git a/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go deleted file mode 100644 index 1c1a02511..000000000 --- a/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux -// +build !386 !amd64 - -package config - -func getAuthFromKernelKeyring(registry string) (string, string, error) { - return "", "", ErrNotSupported -} - -func deleteAuthFromKernelKeyring(registry string) error { - return ErrNotSupported -} - -func setAuthToKernelKeyring(registry, username, password string) error { - return ErrNotSupported -} diff --git a/vendor/github.com/containers/image/pkg/keyctl/key.go b/vendor/github.com/containers/image/pkg/keyctl/key.go deleted file mode 100644 index e4396a9df..000000000 --- a/vendor/github.com/containers/image/pkg/keyctl/key.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// Key represents a single key linked to one or more kernel keyrings. -type Key struct { - Name string - - id, ring keyID - size int -} - -// ID returns the 32-bit kernel identifier for a specific key -func (k *Key) ID() int32 { - return int32(k.id) -} - -// Get the key's value as a byte slice -func (k *Key) Get() ([]byte, error) { - var ( - b []byte - err error - sizeRead int - ) - - if k.size == 0 { - k.size = 512 - } - - size := k.size - - b = make([]byte, int(size)) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - k.size = sizeRead - } - } - return b[:k.size], err -} - -// Unlink a key from the keyring it was loaded from (or added to). If the key -// is not linked to any other keyrings, it is destroyed. -func (k *Key) Unlink() error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) - return err -} diff --git a/vendor/github.com/containers/image/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/pkg/keyctl/keyring.go deleted file mode 100644 index 6e029c923..000000000 --- a/vendor/github.com/containers/image/pkg/keyctl/keyring.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) -// -// Deprecated: Most callers should use either golang.org/x/sys/unix directly, -// or the original (and more extensive) github.com/jsipprell/keyctl . -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// Keyring is the basic interface to a linux keyctl keyring. -type Keyring interface { - ID - Add(string, []byte) (*Key, error) - Search(string) (*Key, error) -} - -type keyring struct { - id keyID -} - -// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. -type ID interface { - ID() int32 -} - -// Add a new key to a keyring. The key can be searched for later by name. -func (kr *keyring) Add(name string, key []byte) (*Key, error) { - r, err := unix.AddKey("user", name, key, int(kr.id)) - if err == nil { - key := &Key{Name: name, id: keyID(r), ring: kr.id} - return key, nil - } - return nil, err -} - -// Search for a key by name, this also searches child keyrings linked to this -// one. The key, if found, is linked to the top keyring that Search() was called -// from. -func (kr *keyring) Search(name string) (*Key, error) { - id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) - if err == nil { - return &Key{Name: name, id: keyID(id), ring: kr.id}, nil - } - return nil, err -} - -// ID returns the 32-bit kernel identifier of a keyring -func (kr *keyring) ID() int32 { - return int32(kr.id) -} - -// SessionKeyring returns the current login session keyring -func SessionKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) -} - -// UserKeyring returns the keyring specific to the current user. -func UserKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_USER_KEYRING) -} - -// Unlink an object from a keyring -func Unlink(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// Link a key into a keyring -func Link(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} diff --git a/vendor/github.com/containers/image/pkg/keyctl/perm.go b/vendor/github.com/containers/image/pkg/keyctl/perm.go deleted file mode 100644 index ae9697149..000000000 --- a/vendor/github.com/containers/image/pkg/keyctl/perm.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// KeyPerm represents in-kernel access control permission to keys and keyrings -// as a 32-bit integer broken up into four permission sets, one per byte. -// In MSB order, the perms are: Processor, User, Group, Other. -type KeyPerm uint32 - -const ( - // PermOtherAll sets all permission for Other - PermOtherAll KeyPerm = 0x3f << (8 * iota) - // PermGroupAll sets all permission for Group - PermGroupAll - // PermUserAll sets all permission for User - PermUserAll - // PermProcessAll sets all permission for Processor - PermProcessAll -) - -// SetPerm sets the permissions on a key or keyring. -func SetPerm(k ID, p KeyPerm) error { - err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) - return err -} diff --git a/vendor/github.com/containers/image/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/pkg/keyctl/sys_linux.go deleted file mode 100644 index 196c82760..000000000 --- a/vendor/github.com/containers/image/pkg/keyctl/sys_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -type keyID int32 - -func newKeyring(id keyID) (*keyring, error) { - r1, err := unix.KeyctlGetKeyringID(int(id), true) - if err != nil { - return nil, err - } - - if id < 0 { - r1 = int(id) - } - return &keyring{id: keyID(r1)}, nil -} diff --git a/vendor/github.com/containers/image/pkg/strslice/README.md b/vendor/github.com/containers/image/pkg/strslice/README.md deleted file mode 100644 index ae6097e82..000000000 --- a/vendor/github.com/containers/image/pkg/strslice/README.md +++ /dev/null @@ -1 +0,0 @@ -This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/pkg/strslice/strslice.go b/vendor/github.com/containers/image/pkg/strslice/strslice.go deleted file mode 100644 index bad493fb8..000000000 --- a/vendor/github.com/containers/image/pkg/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go deleted file mode 100644 index bed92cb90..000000000 --- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go +++ /dev/null @@ -1,483 +0,0 @@ -package sysregistriesv2 - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strings" - "sync" - - "github.com/BurntSushi/toml" - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/containers/image/docker/reference" -) - -// systemRegistriesConfPath is the path to the system-wide registry -// configuration file and is used to add/subtract potential registries for -// obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' -var systemRegistriesConfPath = builtinRegistriesConfPath - -// builtinRegistriesConfPath is the path to the registry configuration file. -// DO NOT change this, instead see systemRegistriesConfPath above. -const builtinRegistriesConfPath = "/etc/containers/registries.conf" - -// Endpoint describes a remote location of a registry. -type Endpoint struct { - // The endpoint's remote location. - Location string `toml:"location,omitempty"` - // If true, certs verification will be skipped and HTTP (non-TLS) - // connections will be allowed. - Insecure bool `toml:"insecure,omitempty"` -} - -// rewriteReference will substitute the provided reference `prefix` to the -// endpoints `location` from the `ref` and creates a new named reference from it. -// The function errors if the newly created reference is not parsable. -func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { - refString := ref.String() - if !refMatchesPrefix(refString, prefix) { - return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) - } - - newNamedRef := strings.Replace(refString, prefix, e.Location, 1) - newParsedRef, err := reference.ParseNamed(newNamedRef) - if err != nil { - return nil, errors.Wrapf(err, "error rewriting reference") - } - logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String()) - return newParsedRef, nil -} - -// Registry represents a registry. -type Registry struct { - // Prefix is used for matching images, and to translate one namespace to - // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` - // and we pull from "example.com/bar/myimage:latest", the image will - // effectively be pulled from "example.com/foo/bar/myimage:latest". - // If no Prefix is specified, it defaults to the specified location. - Prefix string `toml:"prefix"` - // A registry is an Endpoint too - Endpoint - // The registry's mirrors. - Mirrors []Endpoint `toml:"mirror,omitempty"` - // If true, pulling from the registry will be blocked. - Blocked bool `toml:"blocked,omitempty"` - // If true, mirrors will only be used for digest pulls. Pulling images by - // tag can potentially yield different images, depending on which endpoint - // we pull from. Forcing digest-pulls for mirrors avoids that issue. - MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` -} - -// PullSource consists of an Endpoint and a Reference. Note that the reference is -// rewritten according to the registries prefix and the Endpoint's location. -type PullSource struct { - Endpoint Endpoint - Reference reference.Named -} - -// PullSourcesFromReference returns a slice of PullSource's based on the passed -// reference. -func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { - var endpoints []Endpoint - - if r.MirrorByDigestOnly { - // Only use mirrors when the reference is a digest one. - if _, isDigested := ref.(reference.Canonical); isDigested { - endpoints = append(r.Mirrors, r.Endpoint) - } else { - endpoints = []Endpoint{r.Endpoint} - } - } else { - endpoints = append(r.Mirrors, r.Endpoint) - } - - sources := []PullSource{} - for _, ep := range endpoints { - rewritten, err := ep.rewriteReference(ref, r.Prefix) - if err != nil { - return nil, err - } - sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) - } - - return sources, nil -} - -// V1TOMLregistries is for backwards compatibility to sysregistries v1 -type V1TOMLregistries struct { - Registries []string `toml:"registries"` -} - -// V1TOMLConfig is for backwards compatibility to sysregistries v1 -type V1TOMLConfig struct { - Search V1TOMLregistries `toml:"search"` - Insecure V1TOMLregistries `toml:"insecure"` - Block V1TOMLregistries `toml:"block"` -} - -// V1RegistriesConf is the sysregistries v1 configuration format. -type V1RegistriesConf struct { - V1TOMLConfig `toml:"registries"` -} - -// Nonempty returns true if config contains at least one configuration entry. -func (config *V1RegistriesConf) Nonempty() bool { - return (len(config.V1TOMLConfig.Search.Registries) != 0 || - len(config.V1TOMLConfig.Insecure.Registries) != 0 || - len(config.V1TOMLConfig.Block.Registries) != 0) -} - -// V2RegistriesConf is the sysregistries v2 configuration format. -type V2RegistriesConf struct { - Registries []Registry `toml:"registry"` - // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references - UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` -} - -// Nonempty returns true if config contains at least one configuration entry. -func (config *V2RegistriesConf) Nonempty() bool { - return (len(config.Registries) != 0 || - len(config.UnqualifiedSearchRegistries) != 0) -} - -// tomlConfig is the data type used to unmarshal the toml config. -type tomlConfig struct { - V2RegistriesConf - V1RegistriesConf // for backwards compatibility with sysregistries v1 -} - -// InvalidRegistries represents an invalid registry configurations. An example -// is when "registry.com" is defined multiple times in the configuration but -// with conflicting security settings. -type InvalidRegistries struct { - s string -} - -// Error returns the error string. -func (e *InvalidRegistries) Error() string { - return e.s -} - -// parseLocation parses the input string, performs some sanity checks and returns -// the sanitized input string. An error is returned if the input string is -// empty or if contains an "http{s,}://" prefix. -func parseLocation(input string) (string, error) { - trimmed := strings.TrimRight(input, "/") - - if trimmed == "" { - return "", &InvalidRegistries{s: "invalid location: cannot be empty"} - } - - if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { - msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) - return "", &InvalidRegistries{s: msg} - } - - return trimmed, nil -} - -// ConvertToV2 returns a v2 config corresponding to a v1 one. -func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { - regMap := make(map[string]*Registry) - // The order of the registries is not really important, but make it deterministic (the same for the same config file) - // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. - registryOrder := []string{} - - getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object - var err error - location, err = parseLocation(location) - if err != nil { - return nil, err - } - reg, exists := regMap[location] - if !exists { - reg = &Registry{ - Endpoint: Endpoint{Location: location}, - Mirrors: []Endpoint{}, - Prefix: location, - } - regMap[location] = reg - registryOrder = append(registryOrder, location) - } - return reg, nil - } - - for _, blocked := range config.V1TOMLConfig.Block.Registries { - reg, err := getRegistry(blocked) - if err != nil { - return nil, err - } - reg.Blocked = true - } - for _, insecure := range config.V1TOMLConfig.Insecure.Registries { - reg, err := getRegistry(insecure) - if err != nil { - return nil, err - } - reg.Insecure = true - } - - res := &V2RegistriesConf{ - UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, - } - for _, location := range registryOrder { - reg := regMap[location] - res.Registries = append(res.Registries, *reg) - } - return res, nil -} - -// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. -var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") - -// postProcess checks the consistency of all the configuration, looks for conflicts, -// and normalizes the configuration (e.g., sets the Prefix to Location if not set). -func (config *V2RegistriesConf) postProcess() error { - regMap := make(map[string][]*Registry) - - for i := range config.Registries { - reg := &config.Registries[i] - // make sure Location and Prefix are valid - var err error - reg.Location, err = parseLocation(reg.Location) - if err != nil { - return err - } - - if reg.Prefix == "" { - reg.Prefix = reg.Location - } else { - reg.Prefix, err = parseLocation(reg.Prefix) - if err != nil { - return err - } - } - - // make sure mirrors are valid - for _, mir := range reg.Mirrors { - mir.Location, err = parseLocation(mir.Location) - if err != nil { - return err - } - } - regMap[reg.Location] = append(regMap[reg.Location], reg) - } - - // Given a registry can be mentioned multiple times (e.g., to have - // multiple prefixes backed by different mirrors), we need to make sure - // there are no conflicts among them. - // - // Note: we need to iterate over the registries array to ensure a - // deterministic behavior which is not guaranteed by maps. - for _, reg := range config.Registries { - others, _ := regMap[reg.Location] - for _, other := range others { - if reg.Insecure != other.Insecure { - msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) - return &InvalidRegistries{s: msg} - } - if reg.Blocked != other.Blocked { - msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) - return &InvalidRegistries{s: msg} - } - } - } - - for i := range config.UnqualifiedSearchRegistries { - registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) - if err != nil { - return err - } - if !anchoredDomainRegexp.MatchString(registry) { - return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} - } - config.UnqualifiedSearchRegistries[i] = registry - } - - return nil -} - -// ConfigPath returns the path to the system-wide registry configuration file. -func ConfigPath(ctx *types.SystemContext) string { - confPath := systemRegistriesConfPath - if ctx != nil { - if ctx.SystemRegistriesConfPath != "" { - confPath = ctx.SystemRegistriesConfPath - } else if ctx.RootForImplicitAbsolutePaths != "" { - confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) - } - } - return confPath -} - -// configMutex is used to synchronize concurrent accesses to configCache. -var configMutex = sync.Mutex{} - -// configCache caches already loaded configs with config paths as keys and is -// used to avoid redudantly parsing configs. Concurrent accesses to the cache -// are synchronized via configMutex. -var configCache = make(map[string]*V2RegistriesConf) - -// InvalidateCache invalidates the registry cache. This function is meant to be -// used for long-running processes that need to reload potential changes made to -// the cached registry config files. -func InvalidateCache() { - configMutex.Lock() - defer configMutex.Unlock() - configCache = make(map[string]*V2RegistriesConf) -} - -// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. -func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { - configPath := ConfigPath(ctx) - - configMutex.Lock() - // if the config has already been loaded, return the cached registries - if config, inCache := configCache[configPath]; inCache { - configMutex.Unlock() - return config, nil - } - configMutex.Unlock() - - return TryUpdatingCache(ctx) -} - -// TryUpdatingCache loads the configuration from the provided `SystemContext` -// without using the internal cache. On success, the loaded configuration will -// be added into the internal registry cache. -func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { - configPath := ConfigPath(ctx) - - configMutex.Lock() - defer configMutex.Unlock() - - // load the config - config, err := loadRegistryConf(configPath) - if err != nil { - // Return an empty []Registry if we use the default config, - // which implies that the config path of the SystemContext - // isn't set. Note: if ctx.SystemRegistriesConfPath points to - // the default config, we will still return an error. - if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { - return &V2RegistriesConf{Registries: []Registry{}}, nil - } - return nil, err - } - - v2Config := &config.V2RegistriesConf - - // backwards compatibility for v1 configs - if config.V1RegistriesConf.Nonempty() { - if config.V2RegistriesConf.Nonempty() { - return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} - } - v2, err := config.V1RegistriesConf.ConvertToV2() - if err != nil { - return nil, err - } - v2Config = v2 - } - - if err := v2Config.postProcess(); err != nil { - return nil, err - } - - // populate the cache - configCache[configPath] = v2Config - return v2Config, nil -} - -// GetRegistries loads and returns the registries specified in the config. -// Note the parsed content of registry config files is cached. For reloading, -// use `InvalidateCache` and re-call `GetRegistries`. -func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - return config.Registries, nil -} - -// UnqualifiedSearchRegistries returns a list of host[:port] entries to try -// for unqualified image search, in the returned order) -func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - return config.UnqualifiedSearchRegistries, nil -} - -// refMatchesPrefix returns true iff ref, -// which is a registry, repository namespace, repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!), -// matches a Registry.Prefix value. -// (This is split from the caller primarily to make testing easier.) -func refMatchesPrefix(ref, prefix string) bool { - switch { - case len(ref) < len(prefix): - return false - case len(ref) == len(prefix): - return ref == prefix - case len(ref) > len(prefix): - if !strings.HasPrefix(ref, prefix) { - return false - } - c := ref[len(prefix)] - // This allows "example.com:5000" to match "example.com", - // which is unintended; that will get fixed eventually, DON'T RELY - // ON THE CURRENT BEHAVIOR. - return c == ':' || c == '/' || c == '@' - default: - panic("Internal error: impossible comparison outcome") - } -} - -// FindRegistry returns the Registry with the longest prefix for ref, -// which is a registry, repository namespace repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!). -// If no Registry prefixes the image, nil is returned. -func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - - reg := Registry{} - prefixLen := 0 - for _, r := range config.Registries { - if refMatchesPrefix(ref, r.Prefix) { - length := len(r.Prefix) - if length > prefixLen { - reg = r - prefixLen = length - } - } - } - if prefixLen != 0 { - return ®, nil - } - return nil, nil -} - -// Loads the registry configuration file from the filesystem and then unmarshals -// it. Returns the unmarshalled object. -func loadRegistryConf(configPath string) (*tomlConfig, error) { - config := &tomlConfig{} - - configBytes, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err - } - - err = toml.Unmarshal(configBytes, &config) - return config, err -} diff --git a/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go deleted file mode 100644 index 6785564e8..000000000 --- a/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go +++ /dev/null @@ -1,112 +0,0 @@ -package tlsclientconfig - -import ( - "crypto/tls" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc -func SetupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) - if err != nil { - if os.IsNotExist(err) { - // Dangling symbolic link? - // Race with someone who deleted the - // file after we read the directory's - // list of contents? - logrus.Warnf("error reading certificate %q: %v", fullPath, err) - continue - } - return err - } - if tlsc.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return errors.Wrap(err, "unable to get system cert pool") - } - tlsc.RootCAs = systemPool - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(tlsc.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// NewTransport Creates a default transport -func NewTransport() *http.Transport { - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - tr.Dial = proxyDialer.Dial - } - return tr -} diff --git a/vendor/github.com/containers/image/signature/docker.go b/vendor/github.com/containers/image/signature/docker.go deleted file mode 100644 index 16eb3f799..000000000 --- a/vendor/github.com/containers/image/signature/docker.go +++ /dev/null @@ -1,65 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -package signature - -import ( - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/opencontainers/go-digest" -) - -// SignDockerManifest returns a signature for manifest as the specified dockerReference, -// using mech and keyIdentity. -func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return nil, err - } - sig := newUntrustedSignature(manifestDigest, dockerReference) - return sig.sign(mech, keyIdentity) -} - -// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, -// using mech. -func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, - expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { - expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) - if err != nil { - return nil, err - } - sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - if keyIdentity != expectedKeyIdentity { - return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} - } - return nil - }, - validateSignedDockerReference: func(signedDockerReference string) error { - signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) - if err != nil { - return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} - } - if signedRef.String() != expectedRef.String() { - return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", - signedDockerReference, expectedDockerReference)} - } - return nil - }, - validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { - matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) - if err != nil { - return err - } - if !matches { - return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} - } - return nil - }, - }) - if err != nil { - return nil, err - } - return sig, nil -} diff --git a/vendor/github.com/containers/image/signature/json.go b/vendor/github.com/containers/image/signature/json.go deleted file mode 100644 index 9e592863d..000000000 --- a/vendor/github.com/containers/image/signature/json.go +++ /dev/null @@ -1,88 +0,0 @@ -package signature - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -// jsonFormatError is returned when JSON does not match expected format. -type jsonFormatError string - -func (err jsonFormatError) Error() string { - return string(err) -} - -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect -// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to -// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. -// -// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, -// we could use reflection to automate this. Later? -func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { - seenKeys := map[string]struct{}{} - - dec := json.NewDecoder(bytes.NewReader(data)) - t, err := dec.Token() - if err != nil { - return jsonFormatError(err.Error()) - } - if t != json.Delim('{') { - return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) - } - for { - t, err := dec.Token() - if err != nil { - return jsonFormatError(err.Error()) - } - if t == json.Delim('}') { - break - } - - key, ok := t.(string) - if !ok { - // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. - return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) - } - if _, ok := seenKeys[key]; ok { - return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) - } - seenKeys[key] = struct{}{} - - valuePtr := fieldResolver(key) - if valuePtr == nil { - return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) - } - // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. - if err := dec.Decode(valuePtr); err != nil { - return jsonFormatError(err.Error()) - } - } - if _, err := dec.Token(); err != io.EOF { - return jsonFormatError("Unexpected data after JSON object") - } - return nil -} - -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect -// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields -// must be present exactly once, and none other fields are accepted. -func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { - seenKeys := map[string]struct{}{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - if valuePtr, ok := exactFields[key]; ok { - seenKeys[key] = struct{}{} - return valuePtr - } - return nil - }); err != nil { - return err - } - for key := range exactFields { - if _, ok := seenKeys[key]; !ok { - return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) - } - } - return nil -} diff --git a/vendor/github.com/containers/image/signature/mechanism.go b/vendor/github.com/containers/image/signature/mechanism.go deleted file mode 100644 index bdf26c531..000000000 --- a/vendor/github.com/containers/image/signature/mechanism.go +++ /dev/null @@ -1,85 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -package signature - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "strings" - - "golang.org/x/crypto/openpgp" -) - -// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. -// Each mechanism should eventually be closed by calling Close(). -// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to -// eliminate ambiguities, support CA signatures and perhaps other key properties) -type SigningMechanism interface { - // Close removes resources associated with the mechanism, if any. - Close() error - // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. - SupportsSigning() error - // Sign creates a (non-detached) signature of input using keyIdentity. - // Fails with a SigningNotSupportedError if the mechanism does not support signing. - Sign(input []byte, keyIdentity string) ([]byte, error) - // Verify parses unverifiedSignature and returns the content and the signer's identity - Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) - // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, - // along with a short identifier of the key used for signing. - // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) - // is NOT the same as a "key identity" used in other calls ot this interface, and - // the values may have no recognizable relationship if the public key is not available. - UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) -} - -// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. -type SigningNotSupportedError string - -func (err SigningNotSupportedError) Error() string { - return string(err) -} - -// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default -// GPG configuration ($GNUPGHOME / ~/.gnupg) -// The caller must call .Close() on the returned SigningMechanism. -func NewGPGSigningMechanism() (SigningMechanism, error) { - return newGPGSigningMechanismInDirectory("") -} - -// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - return newEphemeralGPGSigningMechanism(blob) -} - -// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. - md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) - if err != nil { - return nil, "", err - } - if !md.IsSigned { - return nil, "", errors.New("The input is not a signature") - } - content, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - // Coverage: An error during reading the body can happen only if - // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key - // to decrypt the contents anyway), or - // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t. - return nil, "", err - } - - // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints - // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! - return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil -} diff --git a/vendor/github.com/containers/image/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/signature/mechanism_gpgme.go deleted file mode 100644 index 4825ab27c..000000000 --- a/vendor/github.com/containers/image/signature/mechanism_gpgme.go +++ /dev/null @@ -1,175 +0,0 @@ -// +build !containers_image_openpgp - -package signature - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - - "github.com/mtrmac/gpgme" -) - -// A GPG/OpenPGP signing mechanism, implemented using gpgme. -type gpgmeSigningMechanism struct { - ctx *gpgme.Context - ephemeralDir string // If not "", a directory to be removed on Close() -} - -// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. -// The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { - ctx, err := newGPGMEContext(optionalDir) - if err != nil { - return nil, err - } - return &gpgmeSigningMechanism{ - ctx: ctx, - ephemeralDir: "", - }, nil -} - -// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") - if err != nil { - return nil, nil, err - } - removeDir := true - defer func() { - if removeDir { - os.RemoveAll(dir) - } - }() - ctx, err := newGPGMEContext(dir) - if err != nil { - return nil, nil, err - } - mech := &gpgmeSigningMechanism{ - ctx: ctx, - ephemeralDir: dir, - } - keyIdentities, err := mech.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err - } - - removeDir = false - return mech, keyIdentities, nil -} - -// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. -func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { - ctx, err := gpgme.New() - if err != nil { - return nil, err - } - if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { - return nil, err - } - if optionalDir != "" { - err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) - if err != nil { - return nil, err - } - } - ctx.SetArmor(false) - ctx.SetTextMode(false) - return ctx, nil -} - -func (m *gpgmeSigningMechanism) Close() error { - if m.ephemeralDir != "" { - os.RemoveAll(m.ephemeralDir) // Ignore an error, if any - } - return nil -} - -// importKeysFromBytes imports public keys from the supplied blob and returns their identities. -// The blob is assumed to have an appropriate format (the caller is expected to know which one). -// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); -// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. -func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { - inputData, err := gpgme.NewDataBytes(blob) - if err != nil { - return nil, err - } - res, err := m.ctx.Import(inputData) - if err != nil { - return nil, err - } - keyIdentities := []string{} - for _, i := range res.Imports { - if i.Result == nil { - keyIdentities = append(keyIdentities, i.Fingerprint) - } - } - return keyIdentities, nil -} - -// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. -func (m *gpgmeSigningMechanism) SupportsSigning() error { - return nil -} - -// Sign creates a (non-detached) signature of input using keyIdentity. -// Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { - key, err := m.ctx.GetKey(keyIdentity, true) - if err != nil { - return nil, err - } - inputData, err := gpgme.NewDataBytes(input) - if err != nil { - return nil, err - } - var sigBuffer bytes.Buffer - sigData, err := gpgme.NewDataWriter(&sigBuffer) - if err != nil { - return nil, err - } - if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { - return nil, err - } - return sigBuffer.Bytes(), nil -} - -// Verify parses unverifiedSignature and returns the content and the signer's identity -func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { - signedBuffer := bytes.Buffer{} - signedData, err := gpgme.NewDataWriter(&signedBuffer) - if err != nil { - return nil, "", err - } - unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) - if err != nil { - return nil, "", err - } - _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) - if err != nil { - return nil, "", err - } - if len(sigs) != 1 { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} - } - sig := sigs[0] - // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves - if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { - // FIXME: Better error reporting eventually - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} - } - return signedBuffer.Bytes(), sig.Fingerprint, nil -} - -// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - return gpgUntrustedSignatureContents(untrustedSignature) -} diff --git a/vendor/github.com/containers/image/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/signature/mechanism_openpgp.go deleted file mode 100644 index eccd610c9..000000000 --- a/vendor/github.com/containers/image/signature/mechanism_openpgp.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build containers_image_openpgp - -package signature - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "strings" - "time" - - "github.com/containers/storage/pkg/homedir" - "golang.org/x/crypto/openpgp" -) - -// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. -type openpgpSigningMechanism struct { - keyring openpgp.EntityList -} - -// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. -// The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { - m := &openpgpSigningMechanism{ - keyring: openpgp.EntityList{}, - } - - gpgHome := optionalDir - if gpgHome == "" { - gpgHome = os.Getenv("GNUPGHOME") - if gpgHome == "" { - gpgHome = path.Join(homedir.Get(), ".gnupg") - } - } - - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - } else { - _, err := m.importKeysFromBytes(pubring) - if err != nil { - return nil, err - } - } - return m, nil -} - -// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - m := &openpgpSigningMechanism{ - keyring: openpgp.EntityList{}, - } - keyIdentities, err := m.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err - } - return m, keyIdentities, nil -} - -func (m *openpgpSigningMechanism) Close() error { - return nil -} - -// importKeysFromBytes imports public keys from the supplied blob and returns their identities. -// The blob is assumed to have an appropriate format (the caller is expected to know which one). -func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { - keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) - if err != nil { - k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) - if e2 != nil { - return nil, err // The original error -- FIXME: is this better? - } - keyring = k - } - - keyIdentities := []string{} - for _, entity := range keyring { - if entity.PrimaryKey == nil { - // Coverage: This should never happen, openpgp.ReadEntity fails with a - // openpgp.errors.StructuralError instead of returning an entity with this - // field set to nil. - continue - } - // Uppercase the fingerprint to be compatible with gpgme - keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) - m.keyring = append(m.keyring, entity) - } - return keyIdentities, nil -} - -// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. -func (m *openpgpSigningMechanism) SupportsSigning() error { - return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") -} - -// Sign creates a (non-detached) signature of input using keyIdentity. -// Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { - return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") -} - -// Verify parses unverifiedSignature and returns the content and the signer's identity -func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { - md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) - if err != nil { - return nil, "", err - } - if !md.IsSigned { - return nil, "", errors.New("not signed") - } - content, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted - // (and possibly also signed, but it _must_ be encrypted) and the signing - // “modification detection code” detects a mismatch. But in that case, - // we would expect the signature verification to fail as well, and that is checked - // first. Besides, we are not supplying any decryption keys, so we really - // can never reach this “encrypted data MDC mismatch” path. - return nil, "", err - } - if md.SignatureError != nil { - return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) - } - if md.SignedBy == nil { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} - } - if md.Signature != nil { - if md.Signature.SigLifetimeSecs != nil { - expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) - if time.Now().After(expiry) { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} - } - } - } else if md.SignatureV3 == nil { - // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, - // or sets md.SignatureError. - return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} - } - - // Uppercase the fingerprint to be compatible with gpgme - return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil -} - -// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - return gpgUntrustedSignatureContents(untrustedSignature) -} diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go deleted file mode 100644 index 12398e385..000000000 --- a/vendor/github.com/containers/image/signature/policy_config.go +++ /dev/null @@ -1,688 +0,0 @@ -// policy_config.go hanles creation of policy objects, either by parsing JSON -// or by programs building them programmatically. - -// The New* constructors are intended to be a stable API. FIXME: after an independent review. - -// Do not invoke the internals of the JSON marshaling/unmarshaling directly. - -// We can't just blindly call json.Unmarshal because that would silently ignore -// typos, and that would just not do for security policy. - -// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. -// But at least it is not worse than blind json.Unmarshal()… - -package signature - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). -// You can override this at build time with -// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' -var systemDefaultPolicyPath = builtinDefaultPolicyPath - -// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). -// DO NOT change this, instead see systemDefaultPolicyPath above. -const builtinDefaultPolicyPath = "/etc/containers/policy.json" - -// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. -type InvalidPolicyFormatError string - -func (err InvalidPolicyFormatError) Error() string { - return string(err) -} - -// DefaultPolicy returns the default policy of the system. -// Most applications should be using this method to get the policy configured -// by the system administrator. -// sys should usually be nil, can be set to override the default. -// NOTE: When this function returns an error, report it to the user and abort. -// DO NOT hard-code fallback policies in your application. -func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { - return NewPolicyFromFile(defaultPolicyPath(sys)) -} - -// defaultPolicyPath returns a path to the default policy of the system. -func defaultPolicyPath(sys *types.SystemContext) string { - if sys != nil { - if sys.SignaturePolicyPath != "" { - return sys.SignaturePolicyPath - } - if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) - } - } - return systemDefaultPolicyPath -} - -// NewPolicyFromFile returns a policy configured in the specified file. -func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) - if err != nil { - return nil, err - } - policy, err := NewPolicyFromBytes(contents) - if err != nil { - return nil, errors.Wrapf(err, "invalid policy in %q", fileName) - } - return policy, nil -} - -// NewPolicyFromBytes returns a policy parsed from the specified blob. -// Use this function instead of calling json.Unmarshal directly. -func NewPolicyFromBytes(data []byte) (*Policy, error) { - p := Policy{} - if err := json.Unmarshal(data, &p); err != nil { - return nil, InvalidPolicyFormatError(err.Error()) - } - return &p, nil -} - -// Compile-time check that Policy implements json.Unmarshaler. -var _ json.Unmarshaler = (*Policy)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (p *Policy) UnmarshalJSON(data []byte) error { - *p = Policy{} - transports := policyTransportsMap{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "default": - return &p.Default - case "transports": - return &transports - default: - return nil - } - }); err != nil { - return err - } - - if p.Default == nil { - return InvalidPolicyFormatError("Default policy is missing") - } - p.Transports = map[string]PolicyTransportScopes(transports) - return nil -} - -// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. -type policyTransportsMap map[string]PolicyTransportScopes - -// Compile-time check that policyTransportsMap implements json.Unmarshaler. -var _ json.Unmarshaler = (*policyTransportsMap)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { - // We can't unmarshal directly into map values because it is not possible to take an address of a map value. - // So, use a temporary map of pointers-to-slices and convert. - tmpMap := map[string]*PolicyTransportScopes{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // transport can be nil - transport := transports.Get(key) - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. - if _, ok := tmpMap[key]; ok { - return nil - } - ptsWithTransport := policyTransportScopesWithTransport{ - transport: transport, - dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. - } - tmpMap[key] = ptsWithTransport.dest - return &ptsWithTransport - }); err != nil { - return err - } - for key, ptr := range tmpMap { - (*m)[key] = *ptr - } - return nil -} - -// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. -// we want to only use policyTransportScopesWithTransport -var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { - return errors.New("Do not try to unmarshal PolicyTransportScopes directly") -} - -// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes -// while validating using a specific ImageTransport if not nil. -type policyTransportScopesWithTransport struct { - transport types.ImageTransport - dest *PolicyTransportScopes -} - -// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. -var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { - // We can't unmarshal directly into map values because it is not possible to take an address of a map value. - // So, use a temporary map of pointers-to-slices and convert. - tmpMap := map[string]*PolicyRequirements{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. - if _, ok := tmpMap[key]; ok { - return nil - } - if key != "" && m.transport != nil { - if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { - return nil - } - } - ptr := &PolicyRequirements{} // This allocates a new instance on each call. - tmpMap[key] = ptr - return ptr - }); err != nil { - return err - } - for key, ptr := range tmpMap { - (*m.dest)[key] = *ptr - } - return nil -} - -// Compile-time check that PolicyRequirements implements json.Unmarshaler. -var _ json.Unmarshaler = (*PolicyRequirements)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { - reqJSONs := []json.RawMessage{} - if err := json.Unmarshal(data, &reqJSONs); err != nil { - return err - } - if len(reqJSONs) == 0 { - return InvalidPolicyFormatError("List of verification policy requirements must not be empty") - } - res := make([]PolicyRequirement, len(reqJSONs)) - for i, reqJSON := range reqJSONs { - req, err := newPolicyRequirementFromJSON(reqJSON) - if err != nil { - return err - } - res[i] = req - } - *m = res - return nil -} - -// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. -func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { - var typeField prCommon - if err := json.Unmarshal(data, &typeField); err != nil { - return nil, err - } - var res PolicyRequirement - switch typeField.Type { - case prTypeInsecureAcceptAnything: - res = &prInsecureAcceptAnything{} - case prTypeReject: - res = &prReject{} - case prTypeSignedBy: - res = &prSignedBy{} - case prTypeSignedBaseLayer: - res = &prSignedBaseLayer{} - default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) - } - if err := json.Unmarshal(data, &res); err != nil { - return nil, err - } - return res, nil -} - -// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. -func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { - return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} -} - -// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. -func NewPRInsecureAcceptAnything() PolicyRequirement { - return newPRInsecureAcceptAnything() -} - -// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. -var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { - *pr = prInsecureAcceptAnything{} - var tmp prInsecureAcceptAnything - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prTypeInsecureAcceptAnything { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *pr = *newPRInsecureAcceptAnything() - return nil -} - -// newPRReject is NewPRReject, except it returns the private type. -func newPRReject() *prReject { - return &prReject{prCommon{Type: prTypeReject}} -} - -// NewPRReject returns a new "reject" PolicyRequirement. -func NewPRReject() PolicyRequirement { - return newPRReject() -} - -// Compile-time check that prReject implements json.Unmarshaler. -var _ json.Unmarshaler = (*prReject)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prReject) UnmarshalJSON(data []byte) error { - *pr = prReject{} - var tmp prReject - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prTypeReject { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *pr = *newPRReject() - return nil -} - -// newPRSignedBy returns a new prSignedBy if parameters are valid. -func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - if !keyType.IsValid() { - return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) - } - if len(keyPath) > 0 && len(keyData) > 0 { - return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") - } - if signedIdentity == nil { - return nil, InvalidPolicyFormatError("signedIdentity not specified") - } - return &prSignedBy{ - prCommon: prCommon{Type: prTypeSignedBy}, - KeyType: keyType, - KeyPath: keyPath, - KeyData: keyData, - SignedIdentity: signedIdentity, - }, nil -} - -// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. -func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, keyPath, nil, signedIdentity) -} - -// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath -func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) -} - -// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. -func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, "", keyData, signedIdentity) -} - -// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData -func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedByKeyData(keyType, keyData, signedIdentity) -} - -// Compile-time check that prSignedBy implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSignedBy)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSignedBy) UnmarshalJSON(data []byte) error { - *pr = prSignedBy{} - var tmp prSignedBy - var gotKeyPath, gotKeyData = false, false - var signedIdentity json.RawMessage - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "type": - return &tmp.Type - case "keyType": - return &tmp.KeyType - case "keyPath": - gotKeyPath = true - return &tmp.KeyPath - case "keyData": - gotKeyData = true - return &tmp.KeyData - case "signedIdentity": - return &signedIdentity - default: - return nil - } - }); err != nil { - return err - } - - if tmp.Type != prTypeSignedBy { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - if signedIdentity == nil { - tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() - } else { - si, err := newPolicyReferenceMatchFromJSON(signedIdentity) - if err != nil { - return err - } - tmp.SignedIdentity = si - } - - var res *prSignedBy - var err error - switch { - case gotKeyPath && gotKeyData: - return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") - case gotKeyPath && !gotKeyData: - res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) - case !gotKeyPath && gotKeyData: - res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) - case !gotKeyPath && !gotKeyData: - return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") - default: // Coverage: This should never happen - return errors.Errorf("Impossible keyPath/keyData presence combination!?") - } - if err != nil { - return err - } - *pr = *res - - return nil -} - -// IsValid returns true iff kt is a recognized value -func (kt sbKeyType) IsValid() bool { - switch kt { - case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, - SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: - return true - default: - return false - } -} - -// Compile-time check that sbKeyType implements json.Unmarshaler. -var _ json.Unmarshaler = (*sbKeyType)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (kt *sbKeyType) UnmarshalJSON(data []byte) error { - *kt = sbKeyType("") - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if !sbKeyType(s).IsValid() { - return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) - } - *kt = sbKeyType(s) - return nil -} - -// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. -func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { - if baseLayerIdentity == nil { - return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") - } - return &prSignedBaseLayer{ - prCommon: prCommon{Type: prTypeSignedBaseLayer}, - BaseLayerIdentity: baseLayerIdentity, - }, nil -} - -// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. -func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedBaseLayer(baseLayerIdentity) -} - -// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { - *pr = prSignedBaseLayer{} - var tmp prSignedBaseLayer - var baseLayerIdentity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "baseLayerIdentity": &baseLayerIdentity, - }); err != nil { - return err - } - - if tmp.Type != prTypeSignedBaseLayer { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) - if err != nil { - return err - } - res, err := newPRSignedBaseLayer(bli) - if err != nil { - // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. - return err - } - *pr = *res - return nil -} - -// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. -func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { - var typeField prmCommon - if err := json.Unmarshal(data, &typeField); err != nil { - return nil, err - } - var res PolicyReferenceMatch - switch typeField.Type { - case prmTypeMatchExact: - res = &prmMatchExact{} - case prmTypeMatchRepoDigestOrExact: - res = &prmMatchRepoDigestOrExact{} - case prmTypeMatchRepository: - res = &prmMatchRepository{} - case prmTypeExactReference: - res = &prmExactReference{} - case prmTypeExactRepository: - res = &prmExactRepository{} - default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) - } - if err := json.Unmarshal(data, &res); err != nil { - return nil, err - } - return res, nil -} - -// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. -func newPRMMatchExact() *prmMatchExact { - return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} -} - -// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. -func NewPRMMatchExact() PolicyReferenceMatch { - return newPRMMatchExact() -} - -// Compile-time check that prmMatchExact implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchExact)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { - *prm = prmMatchExact{} - var tmp prmMatchExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchExact() - return nil -} - -// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. -func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { - return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} -} - -// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. -func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { - return newPRMMatchRepoDigestOrExact() -} - -// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { - *prm = prmMatchRepoDigestOrExact{} - var tmp prmMatchRepoDigestOrExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchRepoDigestOrExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchRepoDigestOrExact() - return nil -} - -// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. -func newPRMMatchRepository() *prmMatchRepository { - return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} -} - -// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. -func NewPRMMatchRepository() PolicyReferenceMatch { - return newPRMMatchRepository() -} - -// Compile-time check that prmMatchRepository implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchRepository)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { - *prm = prmMatchRepository{} - var tmp prmMatchRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchRepository() - return nil -} - -// newPRMExactReference is NewPRMExactReference, except it resturns the private type. -func newPRMExactReference(dockerReference string) (*prmExactReference, error) { - ref, err := reference.ParseNormalizedNamed(dockerReference) - if err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) - } - if reference.IsNameOnly(ref) { - return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) - } - return &prmExactReference{ - prmCommon: prmCommon{Type: prmTypeExactReference}, - DockerReference: dockerReference, - }, nil -} - -// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. -func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { - return newPRMExactReference(dockerReference) -} - -// Compile-time check that prmExactReference implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmExactReference)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmExactReference) UnmarshalJSON(data []byte) error { - *prm = prmExactReference{} - var tmp prmExactReference - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "dockerReference": &tmp.DockerReference, - }); err != nil { - return err - } - - if tmp.Type != prmTypeExactReference { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - - res, err := newPRMExactReference(tmp.DockerReference) - if err != nil { - return err - } - *prm = *res - return nil -} - -// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. -func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { - if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) - } - return &prmExactRepository{ - prmCommon: prmCommon{Type: prmTypeExactRepository}, - DockerRepository: dockerRepository, - }, nil -} - -// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. -func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { - return newPRMExactRepository(dockerRepository) -} - -// Compile-time check that prmExactRepository implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmExactRepository)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { - *prm = prmExactRepository{} - var tmp prmExactRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "dockerRepository": &tmp.DockerRepository, - }); err != nil { - return err - } - - if tmp.Type != prmTypeExactRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - - res, err := newPRMExactRepository(tmp.DockerRepository) - if err != nil { - return err - } - *prm = *res - return nil -} diff --git a/vendor/github.com/containers/image/signature/policy_eval.go b/vendor/github.com/containers/image/signature/policy_eval.go deleted file mode 100644 index b66ece41c..000000000 --- a/vendor/github.com/containers/image/signature/policy_eval.go +++ /dev/null @@ -1,289 +0,0 @@ -// This defines the top-level policy evaluation API. -// To the extent possible, the interface of the fuctions provided -// here is intended to be completely unambiguous, and stable for users -// to rely on. - -package signature - -import ( - "context" - - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// PolicyRequirementError is an explanatory text for rejecting a signature or an image. -type PolicyRequirementError string - -func (err PolicyRequirementError) Error() string { - return string(err) -} - -// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. -type signatureAcceptanceResult string - -const ( - sarAccepted signatureAcceptanceResult = "sarAccepted" - sarRejected signatureAcceptanceResult = "sarRejected" - sarUnknown signatureAcceptanceResult = "sarUnknown" -) - -// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. -// The type is public, but its definition is private. -type PolicyRequirement interface { - // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache - // costly initialization like creating temporary GPG home directories and reading files. - // Setup() (someState, error) - // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. - - // isSignatureAuthorAccepted, given an image and a signature blob, returns: - // - sarAccepted if the signature has been verified against the appropriate public key - // (where "appropriate public key" may depend on the contents of the signature); - // in that case a parsed Signature should be returned. - // - sarRejected if the signature has not been verified; - // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation - // succeeded but the result was rejection. - // - sarUnknown if if this PolicyRequirement does not deal with signatures. - // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. - // Returning sarUnknown and a non-nil error value is invalid. - // WARNING: This makes the signature contents acceptable for futher processing, - // but it does not necessarily mean that the contents of the signature are - // consistent with local policy. - // For example: - // - Do not use a true value to determine whether to run - // a container based on this image; use IsRunningImageAllowed instead. - // - Just because a signature is accepted does not automatically mean the contents of the - // signature are authorized to run code as root, or to affect system or cluster configuration. - isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) - - // isRunningImageAllowed returns true if the requirement allows running an image. - // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation - // succeeded but the result was rejection. - // WARNING: This validates signatures and the manifest, but does not download or validate the - // layers. Users must validate that the layers match their expected digests. - isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) -} - -// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. -// The type is public, but its implementation is private. -type PolicyReferenceMatch interface { - // matchesDockerReference decides whether a specific image identity is accepted for an image - // (or, usually, for the image's Reference().DockerReference()). Note that - // image.Reference().DockerReference() may be nil. - matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool -} - -// PolicyContext encapsulates a policy and possible cached state -// for speeding up its evaluation. -type PolicyContext struct { - Policy *Policy - state policyContextState // Internal consistency checking -} - -// policyContextState is used internally to verify the users are not misusing a PolicyContext. -type policyContextState string - -const ( - pcInvalid policyContextState = "" - pcInitializing policyContextState = "Initializing" - pcReady policyContextState = "Ready" - pcInUse policyContextState = "InUse" - pcDestroying policyContextState = "Destroying" - pcDestroyed policyContextState = "Destroyed" -) - -// changeContextState changes pc.state, or fails if the state is unexpected -func (pc *PolicyContext) changeState(expected, new policyContextState) error { - if pc.state != expected { - return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) - } - pc.state = new - return nil -} - -// NewPolicyContext sets up and initializes a context for the specified policy. -// The policy must not be modified while the context exists. FIXME: make a deep copy? -// If this function succeeds, the caller should call PolicyContext.Destroy() when done. -func NewPolicyContext(policy *Policy) (*PolicyContext, error) { - pc := &PolicyContext{Policy: policy, state: pcInitializing} - // FIXME: initialize - if err := pc.changeState(pcInitializing, pcReady); err != nil { - // Huh?! This should never fail, we didn't give the pointer to anybody. - // Just give up and leave unclean state around. - return nil, err - } - return pc, nil -} - -// Destroy should be called when the user of the context is done with it. -func (pc *PolicyContext) Destroy() error { - if err := pc.changeState(pcReady, pcDestroying); err != nil { - return err - } - // FIXME: destroy - return pc.changeState(pcDestroying, pcDestroyed) -} - -// policyIdentityLogName returns a string description of the image identity for policy purposes. -// ONLY use this for log messages, not for any decisions! -func policyIdentityLogName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() -} - -// requirementsForImageRef selects the appropriate requirements for ref. -func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { - // Do we have a PolicyTransportScopes for this transport? - transportName := ref.Transport().Name() - if transportScopes, ok := pc.Policy.Transports[transportName]; ok { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if req, ok := transportScopes[identity]; ok { - logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) - return req - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if req, ok := transportScopes[name]; ok { - logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) - return req - } - } - - // Look for a default match for the transport. - if req, ok := transportScopes[""]; ok { - logrus.Debugf(` Using transport "%s" policy section ""`, transportName) - return req - } - } - - logrus.Debugf(" Using default policy section") - return pc.Policy.Default -} - -// GetSignaturesWithAcceptedAuthor returns those signatures from an image -// for which the policy accepts the author (and which have been successfully -// verified). -// NOTE: This may legitimately return an empty list and no error, if the image -// has no signatures or only invalid signatures. -// WARNING: This makes the signature contents acceptable for futher processing, -// but it does not necessarily mean that the contents of the signature are -// consistent with local policy. -// For example: -// - Do not use a an existence of an accepted signature to determine whether to run -// a container based on this image; use IsRunningImageAllowed instead. -// - Just because a signature is accepted does not automatically mean the contents of the -// signature are authorized to run code as root, or to affect system or cluster configuration. -func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { - if err := pc.changeState(pcReady, pcInUse); err != nil { - return nil, err - } - defer func() { - if err := pc.changeState(pcInUse, pcReady); err != nil { - sigs = nil - finalErr = err - } - }() - - logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) - reqs := pc.requirementsForImageRef(image.Reference()) - - // FIXME: rename Signatures to UnverifiedSignatures - // FIXME: pass context.Context - unverifiedSignatures, err := image.Signatures(ctx) - if err != nil { - return nil, err - } - - res := make([]*Signature, 0, len(unverifiedSignatures)) - for sigNumber, sig := range unverifiedSignatures { - var acceptedSig *Signature // non-nil if accepted - rejected := false - // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! - logrus.Debugf("Evaluating signature %d:", sigNumber) - interpretingReqs: - for reqNumber, req := range reqs { - // FIXME: Log the requirement itself? For now, we use just the number. - // FIXME: supply state - switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { - case sarAccepted: - if as == nil { // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) - rejected = true - break interpretingReqs - } - logrus.Debugf(" Requirement %d: signature accepted", reqNumber) - if acceptedSig == nil { - acceptedSig = as - } else if *as != *acceptedSig { // Coverage: this should never happen - // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? - logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) - rejected = true - acceptedSig = nil - break interpretingReqs - } - case sarRejected: - logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) - rejected = true - break interpretingReqs - case sarUnknown: - if err != nil { // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) - rejected = true - break interpretingReqs - } - logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) - default: // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) - rejected = true - break interpretingReqs - } - } - // This also handles the (invalid) case of empty reqs, by rejecting the signature. - if acceptedSig != nil && !rejected { - logrus.Debugf(" Overall: OK, signature accepted") - res = append(res, acceptedSig) - } else { - logrus.Debugf(" Overall: Signature not accepted") - } - } - return res, nil -} - -// IsRunningImageAllowed returns true iff the policy allows running the image. -// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation -// succeeded but the result was rejection. -// WARNING: This validates signatures and the manifest, but does not download or validate the -// layers. Users must validate that the layers match their expected digests. -func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { - if err := pc.changeState(pcReady, pcInUse); err != nil { - return false, err - } - defer func() { - if err := pc.changeState(pcInUse, pcReady); err != nil { - res = false - finalErr = err - } - }() - - logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) - reqs := pc.requirementsForImageRef(image.Reference()) - - if len(reqs) == 0 { - return false, PolicyRequirementError("List of verification policy requirements must not be empty") - } - - for reqNumber, req := range reqs { - // FIXME: supply state - allowed, err := req.isRunningImageAllowed(ctx, image) - if !allowed { - logrus.Debugf("Requirement %d: denied, done", reqNumber) - return false, err - } - logrus.Debugf(" Requirement %d: allowed", reqNumber) - } - // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. - logrus.Debugf("Overall: allowed") - return true, nil -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go deleted file mode 100644 index 54c6dc124..000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go +++ /dev/null @@ -1,20 +0,0 @@ -// Policy evaluation for prSignedBaseLayer. - -package signature - -import ( - "context" - - "github.com/containers/image/types" - "github.com/sirupsen/logrus" -) - -func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - return sarUnknown, nil, nil -} - -func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - // FIXME? Reject this at policy parsing time already? - logrus.Errorf("signedBaseLayer not implemented yet!") - return false, PolicyRequirementError("signedBaseLayer not implemented yet!") -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/signature/policy_eval_signedby.go deleted file mode 100644 index d59ffa18b..000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_signedby.go +++ /dev/null @@ -1,131 +0,0 @@ -// Policy evaluation for prSignedBy. - -package signature - -import ( - "context" - "fmt" - "io/ioutil" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - switch pr.KeyType { - case SBKeyTypeGPGKeys: - case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: - // FIXME? Reject this at policy parsing time already? - return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) - default: - // This should never happen, newPRSignedBy ensures KeyType.IsValid() - return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) - } - - if pr.KeyPath != "" && pr.KeyData != nil { - return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) - } - // FIXME: move this to per-context initialization - var data []byte - if pr.KeyData != nil { - data = pr.KeyData - } else { - d, err := ioutil.ReadFile(pr.KeyPath) - if err != nil { - return sarRejected, nil, err - } - data = d - } - - // FIXME: move this to per-context initialization - mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) - if err != nil { - return sarRejected, nil, err - } - defer mech.Close() - if len(trustedIdentities) == 0 { - return sarRejected, nil, PolicyRequirementError("No public keys imported") - } - - signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - for _, trustedIdentity := range trustedIdentities { - if keyIdentity == trustedIdentity { - return nil - } - } - // Coverage: We use a private GPG home directory and only import trusted keys, so this should - // not be reachable. - return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) - }, - validateSignedDockerReference: func(ref string) error { - if !pr.SignedIdentity.matchesDockerReference(image, ref) { - return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) - } - return nil - }, - validateSignedDockerManifestDigest: func(digest digest.Digest) error { - m, _, err := image.Manifest(ctx) - if err != nil { - return err - } - digestMatches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return err - } - if !digestMatches { - return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) - } - return nil - }, - }) - if err != nil { - return sarRejected, nil, err - } - - return sarAccepted, signature, nil -} - -func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - // FIXME: pass context.Context - sigs, err := image.Signatures(ctx) - if err != nil { - return false, err - } - var rejections []error - for _, s := range sigs { - var reason error - switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { - case sarAccepted: - // One accepted signature is enough. - return true, nil - case sarRejected: - reason = err - case sarUnknown: - // Huh?! This should not happen at all; treat it as any other invalid value. - fallthrough - default: - reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) - } - rejections = append(rejections, reason) - } - var summary error - switch len(rejections) { - case 0: - summary = PolicyRequirementError("A signature was required, but no signature exists") - case 1: - summary = rejections[0] - default: - var msgs []string - for _, e := range rejections { - msgs = append(msgs, e.Error()) - } - summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", - strings.Join(msgs, "; "))) - } - return false, summary -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_simple.go b/vendor/github.com/containers/image/signature/policy_eval_simple.go deleted file mode 100644 index b0f2fff20..000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_simple.go +++ /dev/null @@ -1,29 +0,0 @@ -// Policy evaluation for the various simple PolicyRequirement types. - -package signature - -import ( - "context" - "fmt" - - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - // prInsecureAcceptAnything semantics: Every image is allowed to run, - // but this does not consider the signature as verified. - return sarUnknown, nil, nil -} - -func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - return true, nil -} - -func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) -} - -func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) -} diff --git a/vendor/github.com/containers/image/signature/policy_reference_match.go b/vendor/github.com/containers/image/signature/policy_reference_match.go deleted file mode 100644 index a8dad6770..000000000 --- a/vendor/github.com/containers/image/signature/policy_reference_match.go +++ /dev/null @@ -1,101 +0,0 @@ -// PolicyReferenceMatch implementations. - -package signature - -import ( - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. -func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { - r1 := image.Reference().DockerReference() - if r1 == nil { - return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", - transports.ImageName(image.Reference()))) - } - r2, err := reference.ParseNormalizedNamed(s2) - if err != nil { - return nil, nil, err - } - return r1, r2, nil -} - -func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { - return false - } - return signature.String() == intended.String() -} - -func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - - // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(signature) { - return false - } - switch intended.(type) { - case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. - return signature.String() == intended.String() - case reference.Canonical: - // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. - // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, - // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) - return signature.Name() == intended.Name() - default: // !reference.IsNameOnly(intended) - return false - } -} - -func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - return signature.Name() == intended.Name() -} - -// parseDockerReferences converts two reference strings into parsed entities, failing on any error -func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { - r1, err := reference.ParseNormalizedNamed(s1) - if err != nil { - return nil, nil, err - } - r2, err := reference.ParseNormalizedNamed(s2) - if err != nil { - return nil, nil, err - } - return r1, r2, nil -} - -func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) - if err != nil { - return false - } - // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { - return false - } - return signature.String() == intended.String() -} - -func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) - if err != nil { - return false - } - return signature.Name() == intended.Name() -} diff --git a/vendor/github.com/containers/image/signature/policy_types.go b/vendor/github.com/containers/image/signature/policy_types.go deleted file mode 100644 index d3b33bb7a..000000000 --- a/vendor/github.com/containers/image/signature/policy_types.go +++ /dev/null @@ -1,152 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -// This defines types used to represent a signature verification policy in memory. -// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements -// built using the constructor functions provided in policy_config.go. - -package signature - -// NOTE: Keep this in sync with docs/containers-policy.json.5.md! - -// Policy defines requirements for considering a signature, or an image, valid. -type Policy struct { - // Default applies to any image which does not have a matching policy in Transports. - // Note that this can happen even if a matching PolicyTransportScopes exists in Transports - // if the image matches none of the scopes. - Default PolicyRequirements `json:"default"` - Transports map[string]PolicyTransportScopes `json:"transports"` -} - -// PolicyTransportScopes defines policies for images for a specific transport, -// for various scopes, the map keys. -// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); -// there is one scope precisely matching to a single image, and namespace scopes as prefixes -// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) -// The empty scope, if exists, is considered a parent namespace of all other scopes. -// Most specific scope wins, duplication is prohibited (hard failure). -type PolicyTransportScopes map[string]PolicyRequirements - -// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). -// Must not be empty, frequently will only contain a single element. -type PolicyRequirements []PolicyRequirement - -// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. -// The type is public, but its definition is private. - -// prCommon is the common type field in a JSON encoding of PolicyRequirement. -type prCommon struct { - Type prTypeIdentifier `json:"type"` -} - -// prTypeIdentifier is string designating a kind of a PolicyRequirement. -type prTypeIdentifier string - -const ( - prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" - prTypeReject prTypeIdentifier = "reject" - prTypeSignedBy prTypeIdentifier = "signedBy" - prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" -) - -// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: -// every image is allowed to run. -// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). -// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). -// FIXME? Better name? -type prInsecureAcceptAnything struct { - prCommon -} - -// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. -type prReject struct { - prCommon -} - -// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity -type prSignedBy struct { - prCommon - - // KeyType specifies what kind of key reference KeyPath/KeyData is. - // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” - // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only - KeyType sbKeyType `json:"keyType"` - - // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. - KeyPath string `json:"keyPath,omitempty"` - // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. - KeyData []byte `json:"keyData,omitempty"` - - // SignedIdentity specifies what image identity the signature must be claiming about the image. - // Defaults to "match-exact" if not specified. - SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` -} - -// sbKeyType are the allowed values for prSignedBy.KeyType -type sbKeyType string - -const ( - // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring - SBKeyTypeGPGKeys sbKeyType = "GPGKeys" - // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring - SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" - // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates - // FIXME: PEM, DER? - SBKeyTypeX509Certificates sbKeyType = "X509Certificates" - // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs - // FIXME: PEM, DER? - SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" -) - -// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. -type prSignedBaseLayer struct { - prCommon - // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. - BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` -} - -// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. -// The type is public, but its implementation is private. - -// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. -type prmCommon struct { - Type prmTypeIdentifier `json:"type"` -} - -// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. -type prmTypeIdentifier string - -const ( - prmTypeMatchExact prmTypeIdentifier = "matchExact" - prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" - prmTypeMatchRepository prmTypeIdentifier = "matchRepository" - prmTypeExactReference prmTypeIdentifier = "exactReference" - prmTypeExactRepository prmTypeIdentifier = "exactRepository" -) - -// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. -type prmMatchExact struct { - prmCommon -} - -// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, -// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest -type prmMatchRepoDigestOrExact struct { - prmCommon -} - -// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. -type prmMatchRepository struct { - prmCommon -} - -// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. -type prmExactReference struct { - prmCommon - DockerReference string `json:"dockerReference"` -} - -// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. -type prmExactRepository struct { - prmCommon - DockerRepository string `json:"dockerRepository"` -} diff --git a/vendor/github.com/containers/image/signature/signature.go b/vendor/github.com/containers/image/signature/signature.go deleted file mode 100644 index 41f13f72f..000000000 --- a/vendor/github.com/containers/image/signature/signature.go +++ /dev/null @@ -1,280 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! - -package signature - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/pkg/errors" - - "github.com/containers/image/version" - "github.com/opencontainers/go-digest" -) - -const ( - signatureType = "atomic container signature" -) - -// InvalidSignatureError is returned when parsing an invalid signature. -type InvalidSignatureError struct { - msg string -} - -func (err InvalidSignatureError) Error() string { - return err.msg -} - -// Signature is a parsed content of a signature. -// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. -type Signature struct { - DockerManifestDigest digest.Digest - DockerReference string // FIXME: more precise type? -} - -// untrustedSignature is a parsed content of a signature. -type untrustedSignature struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string - // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, - // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). - // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, - // we would add another field, UntrustedTimestampNS int64. - UntrustedTimestamp *int64 -} - -// UntrustedSignatureInformation is information available in an untrusted signature. -// This may be useful when debugging signature verification failures, -// or when managing a set of signatures on a single image. -// -// WARNING: Do not use the contents of this for ANY security decisions, -// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. -// There is NO REASON to expect the values to be correct, or not intentionally misleading -// (including things like “✅ Verified by $authority”) -type UntrustedSignatureInformation struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string - UntrustedTimestamp *time.Time - UntrustedShortKeyIdentifier string -} - -// newUntrustedSignature returns an untrustedSignature object with -// the specified primary contents and appropriate metadata. -func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { - // Use intermediate variables for these values so that we can take their addresses. - // Golang guarantees that they will have a new address on every execution. - creatorID := "atomic " + version.Version - timestamp := time.Now().Unix() - return untrustedSignature{ - UntrustedDockerManifestDigest: dockerManifestDigest, - UntrustedDockerReference: dockerReference, - UntrustedCreatorID: &creatorID, - UntrustedTimestamp: ×tamp, - } -} - -// Compile-time check that untrustedSignature implements json.Marshaler -var _ json.Marshaler = (*untrustedSignature)(nil) - -// MarshalJSON implements the json.Marshaler interface. -func (s untrustedSignature) MarshalJSON() ([]byte, error) { - if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { - return nil, errors.New("Unexpected empty signature content") - } - critical := map[string]interface{}{ - "type": signatureType, - "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, - "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, - } - optional := map[string]interface{}{} - if s.UntrustedCreatorID != nil { - optional["creator"] = *s.UntrustedCreatorID - } - if s.UntrustedTimestamp != nil { - optional["timestamp"] = *s.UntrustedTimestamp - } - signature := map[string]interface{}{ - "critical": critical, - "optional": optional, - } - return json.Marshal(signature) -} - -// Compile-time check that untrustedSignature implements json.Unmarshaler -var _ json.Unmarshaler = (*untrustedSignature)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface -func (s *untrustedSignature) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if _, ok := err.(jsonFormatError); ok { - err = InvalidSignatureError{msg: err.Error()} - } - } - return err -} - -// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. -// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. -func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { - var critical, optional json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "critical": &critical, - "optional": &optional, - }); err != nil { - return err - } - - var creatorID string - var timestamp float64 - var gotCreatorID, gotTimestamp = false, false - if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { - switch key { - case "creator": - gotCreatorID = true - return &creatorID - case "timestamp": - gotTimestamp = true - return ×tamp - default: - var ignore interface{} - return &ignore - } - }); err != nil { - return err - } - if gotCreatorID { - s.UntrustedCreatorID = &creatorID - } - if gotTimestamp { - intTimestamp := int64(timestamp) - if float64(intTimestamp) != timestamp { - return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} - } - s.UntrustedTimestamp = &intTimestamp - } - - var t string - var image, identity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ - "type": &t, - "image": &image, - "identity": &identity, - }); err != nil { - return err - } - if t != signatureType { - return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} - } - - var digestString string - if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ - "docker-manifest-digest": &digestString, - }); err != nil { - return err - } - s.UntrustedDockerManifestDigest = digest.Digest(digestString) - - return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ - "docker-reference": &s.UntrustedDockerReference, - }) -} - -// Sign formats the signature and returns a blob signed using mech and keyIdentity -// (If it seems surprising that this is a method on untrustedSignature, note that there -// isn’t a good reason to think that a key used by the user is trusted by any component -// of the system just because it is a private key — actually the presence of a private key -// on the system increases the likelihood of an a successful attack on that private key -// on that particular system.) -func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { - json, err := json.Marshal(s) - if err != nil { - return nil, err - } - - return mech.Sign(json, keyIdentity) -} - -// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. -// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies -// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature -// because the functions have the same or similar types, so there is a risk of exchanging the functions; -// named members of this struct are more explicit. -type signatureAcceptanceRules struct { - validateKeyIdentity func(string) error - validateSignedDockerReference func(string) error - validateSignedDockerManifestDigest func(digest.Digest) error -} - -// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components -// match expected values, both as specified by rules, and returns it -func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { - signed, keyIdentity, err := mech.Verify(unverifiedSignature) - if err != nil { - return nil, err - } - if err := rules.validateKeyIdentity(keyIdentity); err != nil { - return nil, err - } - - var unmatchedSignature untrustedSignature - if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} - } - if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { - return nil, err - } - if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { - return nil, err - } - // signatureAcceptanceRules have accepted this value. - return &Signature{ - DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, - DockerReference: unmatchedSignature.UntrustedDockerReference, - }, nil -} - -// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, -// WITHOUT doing any cryptographic verification. -// This may be useful when debugging signature verification failures, -// or when managing a set of signatures on a single image. -// -// WARNING: Do not use the contents of this for ANY security decisions, -// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. -// There is NO REASON to expect the values to be correct, or not intentionally misleading -// (including things like “✅ Verified by $authority”) -func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { - // NOTE: This should eventualy do format autodetection. - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - if err != nil { - return nil, err - } - defer mech.Close() - - untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) - if err != nil { - return nil, err - } - var untrustedDecodedContents untrustedSignature - if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} - } - - var timestamp *time.Time // = nil - if untrustedDecodedContents.UntrustedTimestamp != nil { - ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) - timestamp = &ts - } - return &UntrustedSignatureInformation{ - UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, - UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, - UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, - UntrustedTimestamp: timestamp, - UntrustedShortKeyIdentifier: shortKeyIdentifier, - }, nil -} diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go deleted file mode 100644 index 946a85f7b..000000000 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ /dev/null @@ -1,956 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - "sync/atomic" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache/none" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - // ErrBlobDigestMismatch is returned when PutBlob() is given a blob - // with a digest-based name that doesn't match its contents. - ErrBlobDigestMismatch = errors.New("blob digest mismatch") - // ErrBlobSizeMismatch is returned when PutBlob() is given a blob - // with an expected size that doesn't match the reader. - ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetManifest() is called. - // with a non-nil instanceDigest. - ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") - // ErrNoSuchImage is returned when we attempt to access an image which - // doesn't exist in the storage area. - ErrNoSuchImage = storage.ErrNotAnImage -) - -type storageImageSource struct { - imageRef storageReference - image *storage.Image - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice -} - -type storageImageDestination struct { - imageRef storageReference - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - signatures []byte // Signature contents, temporary - putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice -} - -type storageImageCloser struct { - types.ImageCloser - size int64 -} - -// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. -// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably; -// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey -func manifestBigDataKey(digest digest.Digest) string { - return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() -} - -// newImageSource sets up an image for reading. -func newImageSource(imageRef storageReference) (*storageImageSource, error) { - // First, locate the image. - img, err := imageRef.resolveImage() - if err != nil { - return nil, err - } - - // Build the reader object. - image := &storageImageSource{ - imageRef: imageRef, - image: img, - layerPosition: make(map[digest.Digest]int), - SignatureSizes: []int{}, - } - if img.Metadata != "" { - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { - return nil, errors.Wrap(err, "error decoding metadata for source image") - } - } - return image, nil -} - -// Reference returns the image reference that we used to find this image. -func (s *storageImageSource) Reference() types.ImageReference { - return s.imageRef -} - -// Close cleans up any resources we tied up while reading the image. -func (s *storageImageSource) Close() error { - return nil -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *storageImageSource) HasThreadSafeGetBlob() bool { - return true -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { - if info.Digest == image.GzippedEmptyLayerDigest { - return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil - } - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - var layer storage.Layer - var diffOptions *storage.DiffOptions - // We need a valid digest value. - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) - // If it's not a layer, then it must be a data item. - if len(layers) == 0 { - b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // Step through the list of matching layers. Tests may want to verify that if we have multiple layers - // which claim to have the same contents, that we actually do have multiple layers, otherwise we could - // just go ahead and use the first one every time. - s.getBlobMutex.Lock() - i := s.layerPosition[info.Digest] - s.layerPosition[info.Digest] = i + 1 - s.getBlobMutex.Unlock() - if len(layers) > 0 { - layer = layers[i%len(layers)] - } - // Force the storage layer to not try to match any compression that was used when the layer was first - // handed to it. - noCompression := archive.Uncompressed - diffOptions = &storage.DiffOptions{ - Compression: &noCompression, - } - if layer.UncompressedSize < 0 { - n = -1 - } else { - n = layer.UncompressedSize - } - logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) - rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) - if err != nil { - return nil, -1, "", err - } - return rc, n, layer.ID, err -} - -// GetManifest() reads the image's manifest. -func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { - if instanceDigest != nil { - return nil, "", ErrNoManifestLists - } - if len(s.cachedManifest) == 0 { - // The manifest is stored as a big data item. - // Prefer the manifest corresponding to the user-specified digest, if available. - if s.imageRef.named != nil { - if digested, ok := s.imageRef.named.(reference.Digested); ok { - key := manifestBigDataKey(digested.Digest()) - blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) - if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key - return nil, "", err - } - if err == nil { - s.cachedManifest = blob - } - } - } - // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. - // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). - if len(s.cachedManifest) == 0 { - cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) - if err != nil { - return nil, "", err - } - s.cachedManifest = cachedBlob - } - } - return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err -} - -// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of -// the image, after they've been decompressed. -func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - manifestBlob, manifestType, err := s.GetManifest(ctx, nil) - if err != nil { - return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID) - } - man, err := manifest.FromBlob(manifestBlob, manifestType) - if err != nil { - return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID) - } - - uncompressedLayerType := "" - switch manifestType { - case imgspecv1.MediaTypeImageManifest: - uncompressedLayerType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: - // This is actually a compressed type, but there's no uncompressed type defined - uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType - } - - physicalBlobInfos := []types.BlobInfo{} - layerID := s.image.TopLayer - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "error reading layer %q in image %q", layerID, s.image.ID) - } - if layer.UncompressedDigest == "" { - return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) - } - if layer.UncompressedSize < 0 { - return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) - } - blobInfo := types.BlobInfo{ - Digest: layer.UncompressedDigest, - Size: layer.UncompressedSize, - MediaType: uncompressedLayerType, - } - physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) - layerID = layer.Parent - } - - res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) - if err != nil { - return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID) - } - return res, nil -} - -// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, -// but using layer data which we can actually produce — physicalInfos for non-empty layers, -// and image.GzippedEmptyLayer for empty ones. -// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) -func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { - nextPhysical := 0 - res := make([]types.BlobInfo, len(manifestInfos)) - for i, mi := range manifestInfos { - if mi.EmptyLayer { - res[i] = types.BlobInfo{ - Digest: image.GzippedEmptyLayerDigest, - Size: int64(len(image.GzippedEmptyLayer)), - MediaType: mi.MediaType, - } - } else { - if nextPhysical >= len(physicalInfos) { - return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) - } - res[i] = physicalInfos[nextPhysical] - nextPhysical++ - } - } - if nextPhysical != len(physicalInfos) { - return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) - } - return res, nil -} - -// GetSignatures() parses the image's signatures blob into a slice of byte slices. -func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { - if instanceDigest != nil { - return nil, ErrNoManifestLists - } - var offset int - sigslice := [][]byte{} - signature := []byte{} - if len(s.SignatureSizes) > 0 { - signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, "signatures") - if err != nil { - return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.image.ID) - } - signature = signatureBlob - } - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - -// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until -// it's time to Commit() the image -func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { - directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage") - if err != nil { - return nil, errors.Wrapf(err, "error creating a temporary directory") - } - image := &storageImageDestination{ - imageRef: imageRef, - directory: directory, - blobDiffIDs: make(map[digest.Digest]digest.Digest), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - } - return image, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (s *storageImageDestination) Reference() types.ImageReference { - return s.imageRef -} - -// Close cleans up the temporary directory. -func (s *storageImageDestination) Close() error { - return os.RemoveAll(s.directory) -} - -func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { - // We ultimately have to decompress layers to populate trees on disk, - // so callers shouldn't bother compressing them before handing them to - // us, if they're not already compressed. - return types.PreserveOriginal -} - -func (s *storageImageDestination) computeNextBlobCacheFile() string { - return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (s *storageImageDestination) HasThreadSafePutBlob() bool { - return true -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - // Stores a layer or data blob in our temporary directory, checking that any information - // in the blobinfo matches the incoming data. - errorBlobInfo := types.BlobInfo{ - Digest: "", - Size: -1, - } - // Set up to digest the blob and count its size while saving it to a file. - hasher := digest.Canonical.Digester() - if blobinfo.Digest.Validate() == nil { - if a := blobinfo.Digest.Algorithm(); a.Available() { - hasher = a.Digester() - } - } - diffID := digest.Canonical.Digester() - filename := s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) - } - defer file.Close() - counter := ioutils.NewWriteCounter(hasher.Hash()) - reader := io.TeeReader(io.TeeReader(stream, counter), file) - decompressed, err := archive.DecompressStream(reader) - if err != nil { - return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") - } - // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - _, err = io.Copy(diffID.Hash(), decompressed) - decompressed.Close() - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) - } - // Ensure that any information that we were given about the blob is correct. - if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { - return errorBlobInfo, ErrBlobDigestMismatch - } - if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - return errorBlobInfo, ErrBlobSizeMismatch - } - // Record information about the blob. - s.putBlobMutex.Lock() - s.blobDiffIDs[hasher.Digest()] = diffID.Digest() - s.fileSizes[hasher.Digest()] = counter.Count - s.filenames[hasher.Digest()] = filename - s.putBlobMutex.Unlock() - blobDigest := blobinfo.Digest - if blobDigest.Validate() != nil { - blobDigest = hasher.Digest() - } - blobSize := blobinfo.Size - if blobSize < 0 { - blobSize = counter.Count - } - // This is safe because we have just computed both values ourselves. - cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) - return types.BlobInfo{ - Digest: blobDigest, - Size: blobSize, - MediaType: blobinfo.MediaType, - }, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - // lock the entire method as it executes fairly quickly - s.putBlobMutex.Lock() - defer s.putBlobMutex.Unlock() - if blobinfo.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) - } - if err := blobinfo.Digest.Validate(); err != nil { - return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) - } - - // Check if we've already cached it in a file. - if size, ok := s.fileSizes[blobinfo.Digest]; ok { - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: size, - MediaType: blobinfo.MediaType, - }, nil - } - - // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Save this for completeness. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - - // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].CompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - - // Does the blob correspond to a known DiffID which we already have available? - // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the - // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. - if canSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest) - } - if len(layers) > 0 { - if blobinfo.Size != -1 { - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, blobinfo, nil - } - if !canSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) - } - s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: uncompressedDigest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - } - } - - // Nope, we don't have it. - return false, types.BlobInfo{}, nil -} - -// computeID computes a recommended image ID based on information we have so far. If -// the manifest is not of a type that we recognize, we return an empty value, indicating -// that since we don't have a recommendation, a random ID should be used if one needs -// to be allocated. -func (s *storageImageDestination) computeID(m manifest.Manifest) string { - // Build the diffID list. We need the decompressed sums that we've been calculating to - // fill in the DiffIDs. It's expected (but not enforced by us) that the number of - // diffIDs corresponds to the number of non-EmptyLayer entries in the history. - var diffIDs []digest.Digest - switch m := m.(type) { - case *manifest.Schema1: - // Build a list of the diffIDs we've generated for the non-throwaway FS layers, - // in reverse of the order in which they were originally listed. - for i, compat := range m.ExtractedV1Compatibility { - if compat.ThrowAway { - continue - } - blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.blobDiffIDs[blobSum] - if !ok { - logrus.Infof("error looking up diffID for layer %q", blobSum.String()) - return "" - } - diffIDs = append([]digest.Digest{diffID}, diffIDs...) - } - case *manifest.Schema2, *manifest.OCI1: - // We know the ID calculation for these formats doesn't actually use the diffIDs, - // so we don't need to populate the diffID list. - default: - return "" - } - id, err := m.ImageID(diffIDs) - if err != nil { - return "" - } - return id -} - -// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig -// information out of it for Inspect(). -func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { - if info.Digest == "" { - return nil, errors.Errorf(`no digest supplied when reading blob`) - } - if err := info.Digest.Validate(); err != nil { - return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) - } - // Assume it's a file, since we're only calling this from a place that expects to read files. - if filename, ok := s.filenames[info.Digest]; ok { - contents, err2 := ioutil.ReadFile(filename) - if err2 != nil { - return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) - } - return contents, nil - } - // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. - return nil, errors.New("blob not found") -} - -func (s *storageImageDestination) Commit(ctx context.Context) error { - // Find the list of layer blobs. - if len(s.manifest) == 0 { - return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") - } - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) - if err != nil { - return errors.Wrapf(err, "error parsing manifest") - } - layerBlobs := man.LayerInfos() - // Extract or find the layers. - lastLayer := "" - for _, blob := range layerBlobs { - if blob.EmptyLayer { - continue - } - - // Check if there's already a layer with the ID that we'd give to the result of applying - // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - diffID, haveDiffID := s.blobDiffIDs[blob.Digest] - if !haveDiffID { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), - // or to even check if we had it. - // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller - // that relies on using a blob digest that has never been seeen by the store had better call - // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only - // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) - if err != nil { - return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) - } - if !has { - return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) - } - diffID, haveDiffID = s.blobDiffIDs[blob.Digest] - if !haveDiffID { - return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) - } - } - id := diffID.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() - } - if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { - // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - continue - } - // Check if we previously cached a file with that blob's contents. If we didn't, - // then we need to read the desired contents from a layer. - filename, ok := s.filenames[blob.Digest] - if !ok { - // Try to find the layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } - } - if layer == "" { - return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) - } - // Read the layer's contents. - noCompression := archive.Uncompressed - diffOptions := &storage.DiffOptions{ - Compression: &noCompression, - } - diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) - if err2 != nil { - return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) - } - // Copy the layer diff to a file. Diff() takes a lock that it holds - // until the ReadCloser that it returns is closed, and PutLayer() wants - // the same lock, so the diff can't just be directly streamed from one - // to the other. - filename = s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - diff.Close() - return errors.Wrapf(err, "error creating temporary file %q", filename) - } - // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using - // ctx.Done(). - _, err = io.Copy(file, diff) - diff.Close() - file.Close() - if err != nil { - return errors.Wrapf(err, "error storing blob to file %q", filename) - } - // Make sure that we can find this file later, should we need the layer's - // contents again. - s.filenames[blob.Digest] = filename - } - // Read the cached blob and use it as a diff. - file, err := os.Open(filename) - if err != nil { - return errors.Wrapf(err, "error opening file %q", filename) - } - defer file.Close() - // Build the new layer using the diff, regardless of where it came from. - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) - } - lastLayer = layer.ID - } - - // If one of those blobs was a configuration blob, then we can try to dig out the date when the image - // was originally created, in case we're just copying it. If not, no harm done. - options := &storage.ImageOptions{} - if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { - logrus.Debugf("setting image creation date to %s", inspect.Created) - options.CreationDate = *inspect.Created - } - // Create the image record, pointing to the most-recently added layer. - intendedID := s.imageRef.id - if intendedID == "" { - intendedID = s.computeID(man) - } - oldNames := []string{} - img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) - if err != nil { - if errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", intendedID) - } - img, err = s.imageRef.transport.store.Image(intendedID) - if err != nil { - return errors.Wrapf(err, "error reading image %q", intendedID) - } - if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) - } - logrus.Debugf("reusing image ID %q", img.ID) - oldNames = append(oldNames, img.Names...) - } else { - logrus.Debugf("created new image ID %q", img.ID) - } - // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := make(map[digest.Digest]struct{}) - for blob := range s.filenames { - dataBlobs[blob] = struct{}{} - } - for _, layerBlob := range layerBlobs { - delete(dataBlobs, layerBlob.Digest) - } - for blob := range dataBlobs { - v, err := ioutil.ReadFile(s.filenames[blob]) - if err != nil { - return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) - return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) - } - } - // Set the reference's name on the image. - if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { - names := []string{} - if name != nil { - names = append(names, name.String()) - } - if len(oldNames) > 0 { - names = append(names, oldNames...) - } - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) - return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) - } - logrus.Debugf("set names of image %q to %v", img.ID, names) - } - // Save the manifest. Allow looking it up by digest by using the key convention defined by the Store. - // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, - // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. - manifestDigest, err := manifest.Digest(s.manifest) - if err != nil { - return errors.Wrapf(err, "error computing manifest digest") - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return err - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return err - } - // Save the signatures, if we have any. - if len(s.signatures) > 0 { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err - } - } - // Save our metadata. - metadata, err := json.Marshal(s) - if err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) - return err - } - if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) - return err - } - logrus.Debugf("saved image metadata %q", string(metadata)) - } - return nil -} - -var manifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, -} - -func (s *storageImageDestination) SupportedManifestMIMETypes() []string { - return manifestMIMETypes -} - -// PutManifest writes the manifest to the destination. -func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { - if s.imageRef.named != nil { - if digested, ok := s.imageRef.named.(reference.Digested); ok { - matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest()) - if err != nil { - return err - } - if !matches { - return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest()) - } - } - } - - s.manifest = make([]byte, len(manifestBlob)) - copy(s.manifest, manifestBlob) - return nil -} - -// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was -// previously supplied to PutSignatures(). -func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be -// uploaded to the image destination, true otherwise. -func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (s *storageImageDestination) MustMatchRuntimeOS() bool { - return true -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { - return true // Yes, we want the unmodified manifest -} - -// PutSignatures records the image's signatures for committing as a single data blob. -func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - sizes := []int{} - sigblob := []byte{} - for _, sig := range signatures { - sizes = append(sizes, len(sig)) - newblob := make([]byte, len(sigblob)+len(sig)) - copy(newblob, sigblob) - copy(newblob[len(sigblob):], sig) - sigblob = newblob - } - s.signatures = sigblob - s.SignatureSizes = sizes - return nil -} - -// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageSource) getSize() (int64, error) { - var sum int64 - // Size up the data blobs. - dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) - if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.image.ID) - } - for _, dataName := range dataNames { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) - if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.image.ID) - } - sum += bigSize - } - // Add the signature sizes. - for _, sigSize := range s.SignatureSizes { - sum += int64(sigSize) - } - // Walk the layer list. - layerID := s.image.TopLayer - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layer.UncompressedSize - if layer.Parent == "" { - break - } - layerID = layer.Parent - } - return sum, nil -} - -// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageSource) Size() (int64, error) { - return s.getSize() -} - -// Size() returns the previously-computed size of the image, with no error. -func (s *storageImageCloser) Size() (int64, error) { - return s.size, nil -} - -// newImage creates an image that also knows its size -func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { - src, err := newImageSource(s) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, src) - if err != nil { - return nil, err - } - size, err := src.getSize() - if err != nil { - return nil, err - } - return &storageImageCloser{ImageCloser: img, size: size}, nil -} diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go deleted file mode 100644 index c046d9f22..000000000 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ /dev/null @@ -1,225 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "context" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte -// value hex-encoded into a 64-character string, and a reference to a Store -// where an image is, or would be, kept. -// Either "named" or "id" must be set. -type storageReference struct { - transport storageTransport - named reference.Named // may include a tag and/or a digest - id string -} - -func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { - if named == nil && id == "" { - return nil, ErrInvalidReference - } - // We take a copy of the transport, which contains a pointer to the - // store that it used for resolving this reference, so that the - // transport that we'll return from Transport() won't be affected by - // further calls to the original transport's SetStore() method. - return &storageReference{ - transport: transport, - named: named, - id: id, - }, nil -} - -// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref -func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { - repo := ref.Name() - for _, name := range image.Names { - if named, err := reference.ParseNormalizedNamed(name); err == nil { - if named.Name() == repo { - return true - } - } - } - return false -} - -// Resolve the reference's name to an image ID in the store, if there's already -// one present with the same name or ID, and return the image. -func (s *storageReference) resolveImage() (*storage.Image, error) { - var loadedImage *storage.Image - if s.id == "" && s.named != nil { - // Look for an image that has the expanded reference name as an explicit Name value. - image, err := s.transport.store.Image(s.named.String()) - if image != nil && err == nil { - loadedImage = image - s.id = image.ID - } - } - if s.id == "" && s.named != nil { - if digested, ok := s.named.(reference.Digested); ok { - // Look for an image with the specified digest that has the same name, - // though possibly with a different tag or digest, as a Name value, so - // that the canonical reference can be implicitly resolved to the image. - images, err := s.transport.store.ImagesByDigest(digested.Digest()) - if err == nil && len(images) > 0 { - for _, image := range images { - if imageMatchesRepo(image, s.named) { - loadedImage = image - s.id = image.ID - break - } - } - } - } - } - if s.id == "" { - logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) - return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) - } - if loadedImage == nil { - img, err := s.transport.store.Image(s.id) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", s.id) - } - loadedImage = img - } - if s.named != nil { - if !imageMatchesRepo(loadedImage, s.named) { - logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) - return nil, ErrNoSuchImage - } - } - // Default to having the image digest that we hand back match the most recently - // added manifest... - if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { - loadedImage.Digest = digest - } - // ... unless the named reference says otherwise, and it matches one of the digests - // in the image. For those cases, set the Digest field to that value, for the - // sake of older consumers that don't know there's a whole list in there now. - if s.named != nil { - if digested, ok := s.named.(reference.Digested); ok { - for _, digest := range loadedImage.Digests { - if digest == digested.Digest() { - loadedImage.Digest = digest - break - } - } - } - } - return loadedImage, nil -} - -// Return a Transport object that defaults to using the same store that we used -// to build this reference object. -func (s storageReference) Transport() types.ImageTransport { - return &storageTransport{ - store: s.transport.store, - defaultUIDMap: s.transport.defaultUIDMap, - defaultGIDMap: s.transport.defaultGIDMap, - } -} - -// Return a name with a tag or digest, if we have either, else return it bare. -func (s storageReference) DockerReference() reference.Named { - return s.named -} - -// Return a name with a tag, prefixed with the graph root and driver name, to -// disambiguate between images which may be present in multiple stores and -// share only their names. -func (s storageReference) StringWithinTransport() string { - optionsList := "" - options := s.transport.store.GraphOptions() - if len(options) > 0 { - optionsList = ":" + strings.Join(options, ",") - } - res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.named != nil { - res = res + s.named.String() - } - if s.id != "" { - res = res + "@" + s.id - } - return res -} - -func (s storageReference) PolicyConfigurationIdentity() string { - res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - if s.named != nil { - res = res + s.named.String() - } - if s.id != "" { - res = res + "@" + s.id - } - return res -} - -// Also accept policy that's tied to the combination of the graph root and -// driver name, to apply to all images stored in the Store, and to just the -// graph root, in case we're using multiple drivers in the same directory for -// some reason. -func (s storageReference) PolicyConfigurationNamespaces() []string { - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" - namespaces := []string{} - if s.named != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.named.String()) - } - tagged, isTagged := s.named.(reference.Tagged) - _, isDigested := s.named.(reference.Digested) - if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. - namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) - } - components := strings.Split(s.named.Name(), "/") - for len(components) > 0 { - namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) - components = components[:len(components)-1] - } - } - namespaces = append(namespaces, storeSpec) - namespaces = append(namespaces, driverlessStoreSpec) - return namespaces -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - return newImage(ctx, sys, s) -} - -func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - img, err := s.resolveImage() - if err != nil { - return err - } - layers, err := s.transport.store.DeleteImage(img.ID, true) - if err == nil { - logrus.Debugf("deleted image %q", img.ID) - for _, layer := range layers { - logrus.Debugf("deleted layer %q", layer) - } - } - return err -} - -func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(s) -} - -func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(s) -} diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go deleted file mode 100644 index c9a05e6c0..000000000 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ /dev/null @@ -1,366 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -const ( - minimumTruncatedIDLength = 3 -) - -func init() { - transports.Register(Transport) -} - -var ( - // Transport is an ImageTransport that uses either a default - // storage.Store or one that's it's explicitly told to use. - Transport StoreTransport = &storageTransport{} - // ErrInvalidReference is returned when ParseReference() is passed an - // empty reference. - ErrInvalidReference = errors.New("invalid reference") - // ErrPathNotAbsolute is returned when a graph root is not an absolute - // path name. - ErrPathNotAbsolute = errors.New("path name is not absolute") -) - -// StoreTransport is an ImageTransport that uses a storage.Store to parse -// references, either its own default or one that it's told to use. -type StoreTransport interface { - types.ImageTransport - // SetStore sets the default store for this transport. - SetStore(storage.Store) - // GetImage retrieves the image from the transport's store that's named - // by the reference. - GetImage(types.ImageReference) (*storage.Image, error) - // GetStoreImage retrieves the image from a specified store that's named - // by the reference. - GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) - // ParseStoreReference parses a reference, overriding any store - // specification that it may contain. - ParseStoreReference(store storage.Store, reference string) (*storageReference, error) - // SetDefaultUIDMap sets the default UID map to use when opening stores. - SetDefaultUIDMap(idmap []idtools.IDMap) - // SetDefaultGIDMap sets the default GID map to use when opening stores. - SetDefaultGIDMap(idmap []idtools.IDMap) - // DefaultUIDMap returns the default UID map used when opening stores. - DefaultUIDMap() []idtools.IDMap - // DefaultGIDMap returns the default GID map used when opening stores. - DefaultGIDMap() []idtools.IDMap -} - -type storageTransport struct { - store storage.Store - defaultUIDMap []idtools.IDMap - defaultGIDMap []idtools.IDMap -} - -func (s *storageTransport) Name() string { - // Still haven't really settled on a name. - return "containers-storage" -} - -// SetStore sets the Store object which the Transport will use for parsing -// references when information about a Store is not directly specified as part -// of the reference. If one is not set, the library will attempt to initialize -// one with default settings when a reference needs to be parsed. Calling -// SetStore does not affect previously parsed references. -func (s *storageTransport) SetStore(store storage.Store) { - s.store = store -} - -// SetDefaultUIDMap sets the default UID map to use when opening stores. -func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { - s.defaultUIDMap = idmap -} - -// SetDefaultGIDMap sets the default GID map to use when opening stores. -func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { - s.defaultGIDMap = idmap -} - -// DefaultUIDMap returns the default UID map used when opening stores. -func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { - return s.defaultUIDMap -} - -// DefaultGIDMap returns the default GID map used when opening stores. -func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { - return s.defaultGIDMap -} - -// ParseStoreReference takes a name or an ID, tries to figure out which it is -// relative to the given store, and returns it in a reference object. -func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { - if ref == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) - } - if ref[0] == '[' { - // Ignore the store specifier. - closeIndex := strings.IndexRune(ref, ']') - if closeIndex < 1 { - return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) - } - ref = ref[closeIndex+1:] - } - - // The reference may end with an image ID. Image IDs and digests use the same "@" separator; - // here we only peel away an image ID, and leave digests alone. - split := strings.LastIndex(ref, "@") - id := "" - if split != -1 { - possibleID := ref[split+1:] - if possibleID == "" { - return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) - } - // If it looks like a digest, leave it alone for now. - if _, err := digest.Parse(possibleID); err != nil { - // Otherwise… - if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { - id = possibleID // … it is a full ID - } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { - // … it is a truncated version of the ID of an image that's present in local storage, - // so we might as well use the expanded value. - id = img.ID - } else { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) - } - // We have recognized an image ID; peel it off. - ref = ref[:split] - } - } - - // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's - // at least of what we guess is a reasonable minimum length, because we don't want a really short value - // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. - if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { - if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { - // It's a truncated version of the ID of an image that's present in local storage; - // we need to expand it. - id = img.ID - ref = "" - } - } - - var named reference.Named - // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been - // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. - if ref != "" { - var err error - named, err = reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, errors.Wrapf(err, "error parsing named reference %q", ref) - } - named = reference.TagNameOnly(named) - } - - result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) - if err != nil { - return nil, err - } - logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) - return result, nil -} - -func (s *storageTransport) GetStore() (storage.Store, error) { - // Return the transport's previously-set store. If we don't have one - // of those, initialize one now. - if s.store == nil { - options, err := storage.DefaultStoreOptionsAutoDetectUID() - if err != nil { - return nil, err - } - options.UIDMap = s.defaultUIDMap - options.GIDMap = s.defaultGIDMap - store, err := storage.GetStore(options) - if err != nil { - return nil, err - } - s.store = store - } - return s.store, nil -} - -// ParseReference takes a name and a tag or digest and/or ID -// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), -// possibly prefixed with a store specifier in the form "[_graphroot_]" or -// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or -// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", -// tries to figure out which it is, and returns it in a reference object. -// If _id_ is the ID of an image that's present in local storage, it can be truncated, and -// even be specified as if it were a _name_, value. -func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { - var store storage.Store - // Check if there's a store location prefix. If there is, then it - // needs to match a store that was previously initialized using - // storage.GetStore(), or be enough to let the storage library fill out - // the rest using knowledge that it has from elsewhere. - if reference[0] == '[' { - closeIndex := strings.IndexRune(reference, ']') - if closeIndex < 1 { - return nil, ErrInvalidReference - } - storeSpec := reference[1:closeIndex] - reference = reference[closeIndex+1:] - // Peel off a "driver@" from the start. - driverInfo := "" - driverSplit := strings.SplitN(storeSpec, "@", 2) - if len(driverSplit) != 2 { - if storeSpec == "" { - return nil, ErrInvalidReference - } - } else { - driverInfo = driverSplit[0] - if driverInfo == "" { - return nil, ErrInvalidReference - } - storeSpec = driverSplit[1] - if storeSpec == "" { - return nil, ErrInvalidReference - } - } - // Peel off a ":options" from the end. - var options []string - optionsSplit := strings.SplitN(storeSpec, ":", 2) - if len(optionsSplit) == 2 { - options = strings.Split(optionsSplit[1], ",") - storeSpec = optionsSplit[0] - } - // Peel off a "+runroot" from the new end. - runRootInfo := "" - runRootSplit := strings.SplitN(storeSpec, "+", 2) - if len(runRootSplit) == 2 { - runRootInfo = runRootSplit[1] - storeSpec = runRootSplit[0] - } - // The rest is our graph root. - rootInfo := storeSpec - // Check that any paths are absolute paths. - if rootInfo != "" && !filepath.IsAbs(rootInfo) { - return nil, ErrPathNotAbsolute - } - if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { - return nil, ErrPathNotAbsolute - } - store2, err := storage.GetStore(storage.StoreOptions{ - GraphDriverName: driverInfo, - GraphRoot: rootInfo, - RunRoot: runRootInfo, - GraphDriverOptions: options, - UIDMap: s.defaultUIDMap, - GIDMap: s.defaultGIDMap, - }) - if err != nil { - return nil, err - } - store = store2 - } else { - // We didn't have a store spec, so use the default. - store2, err := s.GetStore() - if err != nil { - return nil, err - } - store = store2 - } - return s.ParseStoreReference(store, reference) -} - -func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { - dref := ref.DockerReference() - if dref != nil { - if img, err := store.Image(dref.String()); err == nil { - return img, nil - } - } - if sref, ok := ref.(*storageReference); ok { - tmpRef := *sref - if img, err := tmpRef.resolveImage(); err == nil { - return img, nil - } - } - return nil, storage.ErrImageUnknown -} - -func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { - store, err := s.GetStore() - if err != nil { - return nil, err - } - return s.GetStoreImage(store, ref) -} - -func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { - // Check that there's a store location prefix. Values we're passed are - // expected to come from PolicyConfigurationIdentity or - // PolicyConfigurationNamespaces, so if there's no store location, - // something's wrong. - if scope[0] != '[' { - return ErrInvalidReference - } - // Parse the store location prefix. - closeIndex := strings.IndexRune(scope, ']') - if closeIndex < 1 { - return ErrInvalidReference - } - storeSpec := scope[1:closeIndex] - scope = scope[closeIndex+1:] - storeInfo := strings.SplitN(storeSpec, "@", 2) - if len(storeInfo) == 1 && storeInfo[0] != "" { - // One component: the graph root. - if !filepath.IsAbs(storeInfo[0]) { - return ErrPathNotAbsolute - } - } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { - // Two components: the driver type and the graph root. - if !filepath.IsAbs(storeInfo[1]) { - return ErrPathNotAbsolute - } - } else { - // Anything else: scope specified in a form we don't - // recognize. - return ErrInvalidReference - } - // That might be all of it, and that's okay. - if scope == "" { - return nil - } - - fields := strings.SplitN(scope, "@", 3) - switch len(fields) { - case 1: // name only - case 2: // name:tag@ID or name[:tag]@digest - if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { - if _, digestErr := digest.Parse(fields[1]); digestErr != nil { - return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) - } - } - case 3: // name[:tag]@digest@ID - if _, err := digest.Parse(fields[1]); err != nil { - return err - } - if _, err := digest.Parse("sha256:" + fields[2]); err != nil { - return err - } - default: // Coverage: This should never happen - return errors.New("Internal error: unexpected number of fields form strings.SplitN") - } - // As for field[0], if it is non-empty at all: - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} diff --git a/vendor/github.com/containers/image/tarball/doc.go b/vendor/github.com/containers/image/tarball/doc.go deleted file mode 100644 index a6ced5a0e..000000000 --- a/vendor/github.com/containers/image/tarball/doc.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package tarball provides a way to generate images using one or more layer -// tarballs and an optional template configuration. -// -// An example: -// package main -// -// import ( -// "fmt" -// -// cp "github.com/containers/image/copy" -// "github.com/containers/image/tarball" -// "github.com/containers/image/transports/alltransports" -// -// imgspecv1 "github.com/containers/image/transports/alltransports" -// ) -// -// func imageFromTarball() { -// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") -// // - or - -// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") -// if err != nil { -// panic(err) -// } -// updater, ok := src.(tarball.ConfigUpdater) -// if !ok { -// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") -// } -// config := imgspecv1.Image{ -// Config: imgspecv1.ImageConfig{ -// Cmd: []string{"/bin/bash"}, -// }, -// } -// annotations := make(map[string]string) -// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" -// err = updater.ConfigUpdate(config, annotations) -// if err != nil { -// panic(err) -// } -// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") -// if err != nil { -// panic(err) -// } -// err = cp.Image(nil, dest, src, nil) -// if err != nil { -// panic(err) -// } -// } -package tarball diff --git a/vendor/github.com/containers/image/tarball/tarball_reference.go b/vendor/github.com/containers/image/tarball/tarball_reference.go deleted file mode 100644 index fc1230a89..000000000 --- a/vendor/github.com/containers/image/tarball/tarball_reference.go +++ /dev/null @@ -1,94 +0,0 @@ -package tarball - -import ( - "context" - "fmt" - "os" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/types" - - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ConfigUpdater is an interface that ImageReferences for "tarball" images also -// implement. It can be used to set values for a configuration, and to set -// image annotations which will be present in the images returned by the -// reference's NewImage() or NewImageSource() methods. -type ConfigUpdater interface { - ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error -} - -type tarballReference struct { - transport types.ImageTransport - config imgspecv1.Image - annotations map[string]string - filenames []string - stdin []byte -} - -// ConfigUpdate updates the image's default configuration and adds annotations -// which will be visible in source images created using this reference. -func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { - r.config = config - if r.annotations == nil { - r.annotations = make(map[string]string) - } - for k, v := range annotations { - r.annotations[k] = v - } - return nil -} - -func (r *tarballReference) Transport() types.ImageTransport { - return r.transport -} - -func (r *tarballReference) StringWithinTransport() string { - return strings.Join(r.filenames, ":") -} - -func (r *tarballReference) DockerReference() reference.Named { - return nil -} - -func (r *tarballReference) PolicyConfigurationIdentity() string { - return "" -} - -func (r *tarballReference) PolicyConfigurationNamespaces() []string { - return nil -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := r.NewImageSource(ctx, sys) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, src) - if err != nil { - src.Close() - return nil, err - } - return img, nil -} - -func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - for _, filename := range r.filenames { - if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing %q: %v", filename, err) - } - } - return nil -} - -func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) -} diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go deleted file mode 100644 index 76e3e755f..000000000 --- a/vendor/github.com/containers/image/tarball/tarball_src.go +++ /dev/null @@ -1,268 +0,0 @@ -package tarball - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" - "time" - - "github.com/containers/image/types" - "github.com/klauspost/pgzip" - digest "github.com/opencontainers/go-digest" - imgspecs "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type tarballImageSource struct { - reference tarballReference - filenames []string - diffIDs []digest.Digest - diffSizes []int64 - blobIDs []digest.Digest - blobSizes []int64 - blobTypes []string - config []byte - configID digest.Digest - configSize int64 - manifest []byte -} - -func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - // Gather up the digests, sizes, and date information for all of the files. - filenames := []string{} - diffIDs := []digest.Digest{} - diffSizes := []int64{} - blobIDs := []digest.Digest{} - blobSizes := []int64{} - blobTimes := []time.Time{} - blobTypes := []string{} - for _, filename := range r.filenames { - var file *os.File - var err error - var blobSize int64 - var blobTime time.Time - var reader io.Reader - if filename == "-" { - blobSize = int64(len(r.stdin)) - blobTime = time.Now() - reader = bytes.NewReader(r.stdin) - } else { - file, err = os.Open(filename) - if err != nil { - return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) - } - defer file.Close() - reader = file - fileinfo, err := file.Stat() - if err != nil { - return nil, fmt.Errorf("error reading size of %q: %v", filename, err) - } - blobSize = fileinfo.Size() - blobTime = fileinfo.ModTime() - } - - // Default to assuming the layer is compressed. - layerType := imgspecv1.MediaTypeImageLayerGzip - - // Set up to digest the file as it is. - blobIDdigester := digest.Canonical.Digester() - reader = io.TeeReader(reader, blobIDdigester.Hash()) - - // Set up to digest the file after we maybe decompress it. - diffIDdigester := digest.Canonical.Digester() - uncompressed, err := pgzip.NewReader(reader) - if err == nil { - // It is compressed, so the diffID is the digest of the uncompressed version - reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) - } else { - // It is not compressed, so the diffID and the blobID are going to be the same - diffIDdigester = blobIDdigester - layerType = imgspecv1.MediaTypeImageLayer - uncompressed = nil - } - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - n, err := io.Copy(ioutil.Discard, reader) - if err != nil { - return nil, fmt.Errorf("error reading %q: %v", filename, err) - } - if uncompressed != nil { - uncompressed.Close() - } - - // Grab our uncompressed and possibly-compressed digests and sizes. - filenames = append(filenames, filename) - diffIDs = append(diffIDs, diffIDdigester.Digest()) - diffSizes = append(diffSizes, n) - blobIDs = append(blobIDs, blobIDdigester.Digest()) - blobSizes = append(blobSizes, blobSize) - blobTimes = append(blobTimes, blobTime) - blobTypes = append(blobTypes, layerType) - } - - // Build the rootfs and history for the configuration blob. - rootfs := imgspecv1.RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - created := time.Time{} - history := []imgspecv1.History{} - // Pick up the layer comment from the configuration's history list, if one is set. - comment := "imported from tarball" - if len(r.config.History) > 0 && r.config.History[0].Comment != "" { - comment = r.config.History[0].Comment - } - for i := range diffIDs { - createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) - history = append(history, imgspecv1.History{ - Created: &blobTimes[i], - CreatedBy: createdBy, - Comment: comment, - }) - // Use the mtime of the most recently modified file as the image's creation time. - if created.Before(blobTimes[i]) { - created = blobTimes[i] - } - } - - // Pick up other defaults from the config in the reference. - config := r.config - if config.Created == nil { - config.Created = &created - } - if config.Architecture == "" { - config.Architecture = runtime.GOARCH - } - if config.OS == "" { - config.OS = runtime.GOOS - } - config.RootFS = rootfs - config.History = history - - // Encode and digest the image configuration blob. - configBytes, err := json.Marshal(&config) - if err != nil { - return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) - } - configID := digest.Canonical.FromBytes(configBytes) - configSize := int64(len(configBytes)) - - // Populate a manifest with the configuration blob and the file as the single layer. - layerDescriptors := []imgspecv1.Descriptor{} - for i := range blobIDs { - layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ - Digest: blobIDs[i], - Size: blobSizes[i], - MediaType: blobTypes[i], - }) - } - annotations := make(map[string]string) - for k, v := range r.annotations { - annotations[k] = v - } - manifest := imgspecv1.Manifest{ - Versioned: imgspecs.Versioned{ - SchemaVersion: 2, - }, - Config: imgspecv1.Descriptor{ - Digest: configID, - Size: configSize, - MediaType: imgspecv1.MediaTypeImageConfig, - }, - Layers: layerDescriptors, - Annotations: annotations, - } - - // Encode the manifest. - manifestBytes, err := json.Marshal(&manifest) - if err != nil { - return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) - } - - // Return the image. - src := &tarballImageSource{ - reference: *r, - filenames: filenames, - diffIDs: diffIDs, - diffSizes: diffSizes, - blobIDs: blobIDs, - blobSizes: blobSizes, - blobTypes: blobTypes, - config: configBytes, - configID: configID, - configSize: configSize, - manifest: manifestBytes, - } - - return src, nil -} - -func (is *tarballImageSource) Close() error { - return nil -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (is *tarballImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - // We should only be asked about things in the manifest. Maybe the configuration blob. - if blobinfo.Digest == is.configID { - return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil - } - // Maybe one of the layer blobs. - for i := range is.blobIDs { - if blobinfo.Digest == is.blobIDs[i] { - // We want to read that layer: open the file or memory block and hand it back. - if is.filenames[i] == "-" { - return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil - } - reader, err := os.Open(is.filenames[i]) - if err != nil { - return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) - } - return reader, is.blobSizes[i], nil - } - } - return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) - } - return is.manifest, imgspecv1.MediaTypeImageManifest, nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) - } - return nil, nil -} - -func (is *tarballImageSource) Reference() types.ImageReference { - return &is.reference -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (*tarballImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/tarball/tarball_transport.go b/vendor/github.com/containers/image/tarball/tarball_transport.go deleted file mode 100644 index 72558b5e8..000000000 --- a/vendor/github.com/containers/image/tarball/tarball_transport.go +++ /dev/null @@ -1,66 +0,0 @@ -package tarball - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -const ( - transportName = "tarball" - separator = ":" -) - -var ( - // Transport implements the types.ImageTransport interface for "tarball:" images, - // which are makeshift images constructed using one or more possibly-compressed tar - // archives. - Transport = &tarballTransport{} -) - -type tarballTransport struct { -} - -func (t *tarballTransport) Name() string { - return transportName -} - -func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { - var stdin []byte - var err error - filenames := strings.Split(reference, separator) - for _, filename := range filenames { - if filename == "-" { - stdin, err = ioutil.ReadAll(os.Stdin) - if err != nil { - return nil, fmt.Errorf("error buffering stdin: %v", err) - } - continue - } - f, err := os.Open(filename) - if err != nil { - return nil, fmt.Errorf("error opening %q: %v", filename, err) - } - f.Close() - } - ref := &tarballReference{ - transport: t, - filenames: filenames, - stdin: stdin, - } - return ref, nil -} - -func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - return errors.New(`tarball: does not support any scopes except the default "" one`) -} - -func init() { - transports.Register(Transport) -} diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go deleted file mode 100644 index 3a988f3f8..000000000 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go +++ /dev/null @@ -1,46 +0,0 @@ -package alltransports - -import ( - "strings" - - // register all known transports - // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating - // a transport. - _ "github.com/containers/image/directory" - _ "github.com/containers/image/docker" - _ "github.com/containers/image/docker/archive" - _ "github.com/containers/image/oci/archive" - _ "github.com/containers/image/oci/layout" - _ "github.com/containers/image/openshift" - _ "github.com/containers/image/tarball" - // The ostree transport is registered by ostree*.go - // The storage transport is registered by storage*.go - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// ParseImageName converts a URL-like image name to a types.ImageReference. -func ParseImageName(imgName string) (types.ImageReference, error) { - // Keep this in sync with TransportFromImageName! - parts := strings.SplitN(imgName, ":", 2) - if len(parts) != 2 { - return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) - } - transport := transports.Get(parts[0]) - if transport == nil { - return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) - } - return transport.ParseReference(parts[1]) -} - -// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when -// the transport is unknown or when the input is invalid. -func TransportFromImageName(imageName string) types.ImageTransport { - // Keep this in sync with ParseImageName! - parts := strings.SplitN(imageName, ":", 2) - if len(parts) == 2 { - return transports.Get(parts[0]) - } - return nil -} diff --git a/vendor/github.com/containers/image/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/transports/alltransports/docker_daemon.go deleted file mode 100644 index 6d2ba4b30..000000000 --- a/vendor/github.com/containers/image/transports/alltransports/docker_daemon.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !containers_image_docker_daemon_stub - -package alltransports - -import ( - // Register the docker-daemon transport - _ "github.com/containers/image/docker/daemon" -) diff --git a/vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go deleted file mode 100644 index 27f3850f1..000000000 --- a/vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build containers_image_docker_daemon_stub - -package alltransports - -import "github.com/containers/image/transports" - -func init() { - transports.Register(transports.NewStubTransport("docker-daemon")) -} diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree.go b/vendor/github.com/containers/image/transports/alltransports/ostree.go deleted file mode 100644 index cc4d69fe8..000000000 --- a/vendor/github.com/containers/image/transports/alltransports/ostree.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build containers_image_ostree,linux - -package alltransports - -import ( - // Register the ostree transport - _ "github.com/containers/image/ostree" -) diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go deleted file mode 100644 index fb5b96e54..000000000 --- a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !containers_image_ostree !linux - -package alltransports - -import "github.com/containers/image/transports" - -func init() { - transports.Register(transports.NewStubTransport("ostree")) -} diff --git a/vendor/github.com/containers/image/transports/alltransports/storage.go b/vendor/github.com/containers/image/transports/alltransports/storage.go deleted file mode 100644 index a867c6644..000000000 --- a/vendor/github.com/containers/image/transports/alltransports/storage.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !containers_image_storage_stub - -package alltransports - -import ( - // Register the storage transport - _ "github.com/containers/image/storage" -) diff --git a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go deleted file mode 100644 index 4ac684e58..000000000 --- a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build containers_image_storage_stub - -package alltransports - -import "github.com/containers/image/transports" - -func init() { - transports.Register(transports.NewStubTransport("containers-storage")) -} diff --git a/vendor/github.com/containers/image/transports/stub.go b/vendor/github.com/containers/image/transports/stub.go deleted file mode 100644 index 087f69b6e..000000000 --- a/vendor/github.com/containers/image/transports/stub.go +++ /dev/null @@ -1,36 +0,0 @@ -package transports - -import ( - "fmt" - - "github.com/containers/image/types" -) - -// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -type stubTransport string - -// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -func NewStubTransport(name string) types.ImageTransport { - return stubTransport(name) -} - -// Name returns the name of the transport, which must be unique among other transports. -func (s stubTransport) Name() string { - return string(s) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { - return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { - // Allowing any reference in here allows tools with some transports stubbed-out to still - // use signature verification policies which refer to these stubbed-out transports. - // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . - return nil -} diff --git a/vendor/github.com/containers/image/transports/transports.go b/vendor/github.com/containers/image/transports/transports.go deleted file mode 100644 index 687d0a44e..000000000 --- a/vendor/github.com/containers/image/transports/transports.go +++ /dev/null @@ -1,90 +0,0 @@ -package transports - -import ( - "fmt" - "sort" - "sync" - - "github.com/containers/image/types" -) - -// knownTransports is a registry of known ImageTransport instances. -type knownTransports struct { - transports map[string]types.ImageTransport - mu sync.Mutex -} - -func (kt *knownTransports) Get(k string) types.ImageTransport { - kt.mu.Lock() - t := kt.transports[k] - kt.mu.Unlock() - return t -} - -func (kt *knownTransports) Remove(k string) { - kt.mu.Lock() - delete(kt.transports, k) - kt.mu.Unlock() -} - -func (kt *knownTransports) Add(t types.ImageTransport) { - kt.mu.Lock() - defer kt.mu.Unlock() - name := t.Name() - if t := kt.transports[name]; t != nil { - panic(fmt.Sprintf("Duplicate image transport name %s", name)) - } - kt.transports[name] = t -} - -var kt *knownTransports - -func init() { - kt = &knownTransports{ - transports: make(map[string]types.ImageTransport), - } -} - -// Get returns the transport specified by name or nil when unavailable. -func Get(name string) types.ImageTransport { - return kt.Get(name) -} - -// Delete deletes a transport from the registered transports. -func Delete(name string) { - kt.Remove(name) -} - -// Register registers a transport. -func Register(t types.ImageTransport) { - kt.Add(t) -} - -// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that -// ParseImageName(ImageName(reference)) returns an equivalent reference. -// -// This is the generally recommended way to refer to images in the UI. -// -// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -func ImageName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.StringWithinTransport() -} - -// ListNames returns a list of non deprecated transport names. -// Deprecated transports can be used, but are not presented to users. -func ListNames() []string { - kt.mu.Lock() - defer kt.mu.Unlock() - deprecated := map[string]bool{ - "atomic": true, - } - var names []string - for _, transport := range kt.transports { - if !deprecated[transport.Name()] { - names = append(names, transport.Name()) - } - } - sort.Strings(names) - return names -} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go deleted file mode 100644 index 08b4241e0..000000000 --- a/vendor/github.com/containers/image/types/types.go +++ /dev/null @@ -1,521 +0,0 @@ -package types - -import ( - "context" - "io" - "time" - - "github.com/containers/image/docker/reference" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ImageTransport is a top-level namespace for ways to to store/load an image. -// It should generally correspond to ImageSource/ImageDestination implementations. -// -// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. -// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS -// (or, even, IPv4 or IPv6). -// -// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. -// For example, several different ImageTransport implementations may be based on local filesystem paths, -// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) -// -// See also transports.KnownTransports. -type ImageTransport interface { - // Name returns the name of the transport, which must be unique among other transports. - Name() string - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. - ParseReference(reference string) (ImageReference, error) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys - // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). - // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. - // scope passed to this function will not be "", that value is always allowed. - ValidatePolicyConfigurationScope(scope string) error -} - -// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. -// -// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening -// within an ImageTransport.ParseReference() or equivalent API creating the reference object. -// That's also why the various identification/formatting methods of this type do not support returning errors. -// -// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside -// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. -type ImageReference interface { - Transport() ImageTransport - // StringWithinTransport returns a string representation of the reference, which MUST be such that - // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. - // NOTE: The returned string is not promised to be equal to the original input to ParseReference; - // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. - // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; - // instead, see transports.ImageName(). - StringWithinTransport() string - - // DockerReference returns a Docker reference associated with this reference - // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, - // not e.g. after redirect or alias processing), or nil if unknown/not applicable. - DockerReference() reference.Named - - // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. - // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; - // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical - // (i.e. various references with exactly the same semantics should return the same configuration identity) - // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but - // not required/guaranteed that it will be a valid input to Transport().ParseReference(). - // Returns "" if configuration identities for these references are not supported. - PolicyConfigurationIdentity() string - - // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search - // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed - // in order, terminating on first match, and an implicit "" is always checked at the end. - // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), - // and each following element to be a prefix of the element preceding it. - PolicyConfigurationNamespaces() []string - - // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned ImageCloser. - // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, - // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. - NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) - // NewImageSource returns a types.ImageSource for this reference. - // The caller must call .Close() on the returned ImageSource. - NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) - // NewImageDestination returns a types.ImageDestination for this reference. - // The caller must call .Close() on the returned ImageDestination. - NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) - - // DeleteImage deletes the named image from the registry, if supported. - DeleteImage(ctx context.Context, sys *SystemContext) error -} - -// BlobInfo collects known information about a blob (layer/config). -// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. -type BlobInfo struct { - Digest digest.Digest // "" if unknown. - Size int64 // -1 if unknown - URLs []string - Annotations map[string]string - MediaType string -} - -// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. -// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). -// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. -// -// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different -// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, -// at least by not failing hard when encountering unknown data. -type BICTransportScope struct { - Opaque string -} - -// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. -// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob -// can look it up using BlobInfoCache.CandidateLocations. -// -// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different -// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, -// at least by not failing hard when encountering unknown data. -type BICLocationReference struct { - Opaque string -} - -// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. -type BICReplacementCandidate struct { - Digest digest.Digest - Location BICLocationReference -} - -// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies. -// -// It records two kinds of data: -// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: -// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. -// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), -// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ -// -// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known -// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). -// -// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently -// compress/decompress blobs for their own purposes. -// -// - Known blob locations, managed by individual transports: -// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), -// recording transport-specific information that allows the transport to reuse the blob in the future; -// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. -// -// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs -// can be directly reused within a registry, or mounted across registries within a registry server.) -// -// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; -// users of the cahce should just fall back to copying the blobs the usual way. -type BlobInfoCache interface { - // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. - // May return anyDigest if it is known to be uncompressed. - // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). - UncompressedDigest(anyDigest digest.Digest) digest.Digest - // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. - // It’s allowed for anyDigest == uncompressed. - // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. - // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. - // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) - RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) - - // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, - // and can be reused given the opaque location data. - RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) - // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused - // within the specified (transport scope) (if they still exist, which is not guaranteed). - // - // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, - // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same - // uncompressed digest. - CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate -} - -// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). -// This is primarily useful for copying images around; for examining their properties, Image (below) -// is usually more useful. -// Each ImageSource should eventually be closed by calling Close(). -// -// WARNING: Various methods which return an object identified by digest generally do not -// validate that the returned data actually matches that digest; this is the caller’s responsibility. -type ImageSource interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Close removes resources associated with an initialized ImageSource, if any. - Close() error - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). - // It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); - // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). - GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. - GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) - // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. - HasThreadSafeGetBlob() bool - // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy(ctx context.Context) ([]BlobInfo, error) -} - -// LayerCompression indicates if layers must be compressed, decompressed or preserved -type LayerCompression int - -const ( - // PreserveOriginal indicates the layer must be preserved, ie - // no compression or decompression. - PreserveOriginal LayerCompression = iota - // Decompress indicates the layer must be decompressed - Decompress - // Compress indicates the layer must be compressed - Compress -) - -// ImageDestination is a service, possibly remote (= slow), to store components of a single image. -// -// There is a specific required order for some of the calls: -// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) -// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) -// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. -// -// Each ImageDestination should eventually be closed by calling Close(). -type ImageDestination interface { - // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, - // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. - Reference() ImageReference - // Close removes resources associated with an initialized ImageDestination, if any. - Close() error - - // SupportedManifestMIMETypes tells which manifest mime types the destination supports - // If an empty slice or nil it's returned, then any mime type can be tried to upload - SupportedManifestMIMETypes() []string - // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. - // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. - SupportsSignatures(ctx context.Context) error - // DesiredLayerCompression indicates the kind of compression to apply on layers - DesiredLayerCompression() LayerCompression - // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually - // uploaded to the image destination, true otherwise. - AcceptsForeignLayerURLs() bool - // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. - MustMatchRuntimeOS() bool - // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), - // and would prefer to receive an unmodified manifest instead of one modified for the destination. - // Does not make a difference if Reference().DockerReference() is nil. - IgnoresEmbeddedDockerReference() bool - - // PutBlob writes contents of stream and returns data representing the result. - // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. - // inputInfo.Size is the expected length of stream, if known. - // inputInfo.MediaType describes the blob format, if known. - // May update cache. - // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available - // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) - // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. - HasThreadSafePutBlob() bool - // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination - // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). - // info.Digest must not be empty. - // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. - // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. - // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. - // May use and/or update cache. - TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) - // PutManifest writes manifest to the destination. - // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. - // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), - // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. - PutManifest(ctx context.Context, manifest []byte) error - PutSignatures(ctx context.Context, signatures [][]byte) error - // Commit marks the process of storing the image as successful and asks for the image to be persisted. - // WARNING: This does not have any transactional semantics: - // - Uploaded data MAY be visible to others before Commit() is called - // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) - Commit(ctx context.Context) error -} - -// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, -// refuses specifically this manifest type, but may accept a different manifest type. -type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. - Err error -} - -func (e ManifestTypeRejectedError) Error() string { - return e.Err.Error() -} - -// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. -// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, -// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. -// This also makes the UnparsedImage→Image conversion an explicitly visible step. -// -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -type UnparsedImage interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. - Manifest(ctx context.Context) ([]byte, string, error) - // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. - Signatures(ctx context.Context) ([][]byte, error) -} - -// Image is the primary API for inspecting properties of images. -// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The Image must not be used after the underlying ImageSource is Close()d. -type Image interface { - // Note that Reference may return nil in the return value of UpdatedImage! - UnparsedImage - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*v1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []BlobInfo - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy(context.Context) ([]BlobInfo, error) - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. - // This does not change the state of the original Image object. - UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) - // Size returns an approximation of the amount of disk space which is consumed by the image in its current - // location. If the size is not known, -1 will be returned. - Size() (int64, error) -} - -// ImageCloser is an Image with a Close() method which must be called by the user. -// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, -// to ensure that the ImageSource is closed. -type ImageCloser interface { - Image - // Close removes resources associated with an initialized ImageCloser. - Close() error -} - -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest -type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. - EmbeddedDockerReference reference.Named - ManifestMIMEType string - // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. - InformationOnly ManifestUpdateInformation -} - -// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here -// only to make writing struct literals possible. -type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) - LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) - LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. -} - -// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. -// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported -// for other manifest types. -type ImageInspectInfo struct { - Tag string - Created *time.Time - DockerVersion string - Labels map[string]string - Architecture string - Os string - Layers []string - Env []string -} - -// DockerAuthConfig contains authorization information for connecting to a registry. -// the value of Username and Password can be empty for accessing the registry anonymously -type DockerAuthConfig struct { - Username string - Password string -} - -// OptionalBool is a boolean with an additional undefined value, which is meant -// to be used in the context of user input to distinguish between a -// user-specified value and a default value. -type OptionalBool byte - -const ( - // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. - OptionalBoolUndefined OptionalBool = iota - // OptionalBoolTrue represents the boolean true. - OptionalBoolTrue - // OptionalBoolFalse represents the boolean false. - OptionalBoolFalse -) - -// NewOptionalBool converts the input bool into either OptionalBoolTrue or -// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. -func NewOptionalBool(b bool) OptionalBool { - o := OptionalBoolFalse - if b == true { - o = OptionalBoolTrue - } - return o -} - -// SystemContext allows parameterizing access to implicitly-accessed resources, -// like configuration files in /etc and users' login state in their home directory. -// Various components can share the same field only if their semantics is exactly -// the same; if in doubt, add a new field. -// It is always OK to pass nil instead of a SystemContext. -type SystemContext struct { - // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). - // Not used for any of the more specific path overrides available in this struct. - // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). - // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . - // and there is no need to worry about the environment.) - // NOTE: This does NOT affect paths starting by $HOME. - RootForImplicitAbsolutePaths string - - // === Global configuration overrides === - // If not "", overrides the system's default path for signature.Policy configuration. - SignaturePolicyPath string - // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) - RegistriesDirPath string - // Path to the system-wide registries configuration file - SystemRegistriesConfPath string - // If not "", overrides the default path for the authentication file - AuthFilePath string - // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. - ArchitectureChoice string - // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. - OSChoice string - // If not "", overrides the system's default directory containing a blob info cache. - BlobInfoCacheDir string - - // Additional tags when creating or copying a docker-archive. - DockerArchiveAdditionalTags []reference.NamedTagged - - // === OCI.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key - // (ending with ".key") used when downloading OCI image layers. - OCICertPath string - // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - OCIInsecureSkipTLSVerify bool - // If not "", use a shared directory for storing blobs rather than within OCI layouts - OCISharedBlobDirPath string - // Allow UnCompress image layer for OCI image layer - OCIAcceptUncompressedLayers bool - - // === docker.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key - // (ending with ".key") used when talking to a Docker Registry. - DockerCertPath string - // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. - // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - DockerInsecureSkipTLSVerify OptionalBool - // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials - DockerAuthConfig *DockerAuthConfig - // if not "", an User-Agent header is added to each request when contacting a registry. - DockerRegistryUserAgent string - // if true, a V1 ping attempt isn't done to give users a better error. Default is false. - // Note that this field is used mainly to integrate containers/image into projectatomic/docker - // in order to not break any existing docker's integration tests. - DockerDisableV1Ping bool - // Directory to use for OSTree temporary files - OSTreeTmpDirPath string - - // === docker/daemon.Transport overrides === - // A directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when talking to a Docker daemon. - DockerDaemonCertPath string - // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. - DockerDaemonHost string - // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. - DockerDaemonInsecureSkipTLSVerify bool - - // === dir.Transport overrides === - // DirForceCompress compresses the image layers if set to true - DirForceCompress bool -} - -// ProgressProperties is used to pass information from the copy code to a monitor which -// can use the real-time information to produce output or react to changes. -type ProgressProperties struct { - Artifact BlobInfo - Offset uint64 -} diff --git a/vendor/github.com/containers/image/v4/LICENSE b/vendor/github.com/containers/image/v4/LICENSE new file mode 100644 index 000000000..953563530 --- /dev/null +++ b/vendor/github.com/containers/image/v4/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/image/v4/copy/copy.go b/vendor/github.com/containers/image/v4/copy/copy.go new file mode 100644 index 000000000..30d8a4464 --- /dev/null +++ b/vendor/github.com/containers/image/v4/copy/copy.go @@ -0,0 +1,975 @@ +package copy + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/pkg/blobinfocache" + "github.com/containers/image/v4/pkg/compression" + "github.com/containers/image/v4/signature" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb" + "github.com/vbauerster/mpb/decor" + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/sync/semaphore" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// maxParallelDownloads is used to limit the maxmimum number of parallel +// downloads. Let's follow Firefox by limiting it to 6. +var maxParallelDownloads = 6 + +// compressionBufferSize is the buffer size used to compress a blob +var compressionBufferSize = 1048576 + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + if err := expectedDigest.Validate(); err != nil { + return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + } + return &digestingReader{ + source: source, + digester: digestAlgorithm.Digester(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} + +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +type copier struct { + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache types.BlobInfoCache + copyInParallel bool + compressionFormat compression.Algorithm + compressionLevel *int +} + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + canModifyManifest bool + canSubstituteBlobs bool +} + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + ForceManifestMIMEType string +} + +// Image copies image from srcRef to destRef, using policyContext to validate +// source image admissibility. It returns the manifest which was written to +// the new copy of the image. +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) { + // NOTE this function uses an output parameter for the error return value. + // Setting this and returning is the ideal way to return an error. + // + // the defers in this routine will wrap the error return with its own errors + // which can be valuable context in the middle of a multi-streamed copy. + if options == nil { + options = &Options{} + } + + reportWriter := ioutil.Discard + + if options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + + dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + if err != nil { + return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) + } + defer func() { + if err := dest.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (dest: %v)", err) + } + }() + + rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + if err != nil { + return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) + } + defer func() { + if err := rawSource.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (src: %v)", err) + } + }() + + // If reportWriter is not a TTY (e.g., when piping to a file), do not + // print the progress bars to avoid long and hard to parse output. + // createProgressBar() will print a single line instead. + progressOutput := reportWriter + if !isTTY(reportWriter) { + progressOutput = ioutil.Discard + } + copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() + + c := &copier{ + dest: dest, + rawSource: rawSource, + reportWriter: reportWriter, + progressOutput: progressOutput, + progressInterval: options.ProgressInterval, + progress: options.Progress, + copyInParallel: copyInParallel, + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually + // we might want to add a separate CommonCtx — or would that be too confusing? + blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), + } + // Default to using gzip compression unless specified otherwise. + if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { + algo, err := compression.AlgorithmByName("gzip") + if err != nil { + return nil, err + } + c.compressionFormat = algo + } else { + c.compressionFormat = *options.DestinationCtx.CompressionFormat + } + if options.DestinationCtx != nil { + // Note that the compressionLevel can be nil. + c.compressionLevel = options.DestinationCtx.CompressionLevel + } + + unparsedToplevel := image.UnparsedInstance(rawSource, nil) + multiImage, err := isMultiImage(ctx, unparsedToplevel) + if err != nil { + return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) + } + + if !multiImage { + // The simple case: Just copy a single image. + if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil { + return nil, err + } + } else { + // This is a manifest list. Choose a single image and copy it. + // FIXME: Copy to destinations which support manifest lists, one image at a time. + instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel) + if err != nil { + return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + + if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil { + return nil, err + } + } + + if err := c.dest.Commit(ctx); err != nil { + return nil, errors.Wrap(err, "Error committing the finished image") + } + + return manifest, nil +} + +// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate +// source image admissibility. +func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + } + if multiImage { + return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (the multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return nil, errors.Wrap(err, "Source image rejected") + } + src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) + if err != nil { + return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + sourceManifest, _, err := src.Manifest(ctx) + if err != nil { + return nil, errors.Wrapf(err, "Error reading manifest from source image") + } + matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) + if err != nil { + return nil, errors.Wrapf(err, "Error computing digest of source image's manifest") + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil { + return nil, err + } + + var sigs [][]byte + if options.RemoveSignatures { + sigs = [][]byte{} + } else { + c.Printf("Getting image source signatures\n") + s, err := src.Signatures(ctx) + if err != nil { + return nil, errors.Wrap(err, "Error reading signatures") + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("Checking if image destination supports signatures\n") + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, errors.Wrap(err, "Can not copy signatures") + } + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, + } + // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return nil, err + } + + // We compute preferredManifestMIMEType only to show it in error messages. + // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. + preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) + if err != nil { + return nil, err + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + + if err := ic.copyLayers(ctx); err != nil { + return nil, err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type. If the process succeeds, fine… + manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx) + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) + // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. + if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return nil, err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if !ic.canModifyManifest { + return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} + for _, manifestMIMEType := range otherManifestMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, err := ic.copyUpdatedConfigAndManifest(ctx) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + manifestBytes = attemptedManifest + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + + if options.SignBy != "" { + newSig, err := c.createSignature(manifestBytes, options.SignBy) + if err != nil { + return nil, err + } + sigs = append(sigs, newSig) + } + + c.Printf("Storing signatures\n") + if err := c.dest.PutSignatures(ctx, sigs); err != nil { + return nil, errors.Wrap(err, "Error writing signatures") + } + + return manifestBytes, nil +} + +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...interface{}) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { + if dest.MustMatchRuntimeOS() { + wantedOS := runtime.GOOS + if sys != nil && sys.OSChoice != "" { + wantedOS = sys.OSChoice + } + c, err := src.OCIConfig(ctx) + if err != nil { + return errors.Wrapf(err, "Error parsing image configuration") + } + osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) + if wantedOS == "windows" && c.OS == "linux" { + return osErr + } else if wantedOS != "windows" && c.OS == "windows" { + return osErr + } + } + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if !ic.canModifyManifest { + return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", + transports.ImageName(ic.c.dest.Reference()), destRef.String()) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +// isTTY returns true if the io.Writer is a file and a tty. +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + return terminal.IsTerminal(int(f.Fd())) + } + return false +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +func (ic *imageCopier) copyLayers(ctx context.Context) error { + srcInfos := ic.src.LayerInfos() + numLayers := len(srcInfos) + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return err + } + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if !ic.canModifyManifest { + return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + copyGroup.Add(numLayers) + + // copySemaphore is used to limit the number of parallel downloads to + // avoid malicious images causing troubles and to be nice to servers. + var copySemaphore *semaphore.Weighted + if ic.c.copyInParallel { + copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) + } else { + copySemaphore = semaphore.NewWeighted(int64(1)) + } + + data := make([]copyLayerData, numLayers) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { + defer copySemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) + } + data[index] = cld + } + + func() { // A scope for defer + progressPool, progressCleanup := ic.c.newProgressPool(ctx) + defer progressCleanup() + + for i, srcLayer := range srcInfos { + copySemaphore.Acquire(ctx, 1) + go copyLayerHelper(i, srcLayer, progressPool) + } + + // Wait for all layers to be copied + copyGroup.Wait() + }() + + destInfos := make([]types.BlobInfo, numLayers) + diffIDs := make([]digest.Digest, numLayers) + for i, cld := range data { + if cld.err != nil { + return cld.err + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + return nil +} + +// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + if len(a) != len(b) { + return true + } + for i := range a { + if a[i].Digest != b[i].Digest { + return true + } + } + return false +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte, error) { + pendingImage := ic.src + if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { + if !ic.canModifyManifest { + return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, errors.Wrap(err, "Error creating an updated image manifest") + } + pendingImage = pi + } + manifest, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, errors.Wrap(err, "Error reading manifest") + } + + if err := ic.c.copyConfig(ctx, pendingImage); err != nil { + return nil, err + } + + ic.c.Printf("Writing manifest to image destination\n") + if err := ic.c.dest.PutManifest(ctx, manifest); err != nil { + return nil, errors.Wrap(err, "Error writing manifest") + } + return manifest, nil +} + +// newProgressPool creates a *mpb.Progress and a cleanup function. +// The caller must eventually call the returned cleanup function after the pool will no longer be updated. +func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { + ctx, cancel := context.WithCancel(ctx) + pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx)) + return pool, func() { + cancel() + pool.Wait() + } +} + +// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter +// is ioutil.Discard, the progress bar's output will be discarded +func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + bar = pool.AddBar(info.Size, + mpb.BarClearOnComplete(), + mpb.PrependDecorators( + decor.Name(prefix), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete), + ), + ) + } else { + bar = pool.AddSpinner(info.Size, + mpb.SpinnerOnLeft, + mpb.BarClearOnComplete(), + mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}), + mpb.PrependDecorators( + decor.Name(prefix), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.Name(""), " "+onComplete), + ), + ) + } + if c.progressOutput == ioutil.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } + return bar +} + +// copyConfig copies config.json, if any, from src to dest. +func (c *copier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) + } + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool, progressCleanup := c.newProgressPool(ctx) + defer progressCleanup() + bar := c.createProgressBar(progressPool, srcInfo, "config", "done") + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) + if err != nil { + return types.BlobInfo{}, err + } + bar.SetTotal(int64(len(configBlob)), true) + return destInfo, nil + }() + if err != nil { + return nil + } + if destInfo.Digest != srcInfo.Digest { + return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { + cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + + // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. + if !diffIDIsNeeded { + reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") + bar.SetTotal(0, true) + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) + } + defer srcStream.Close() + + bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, Annotations: srcInfo.Annotations}, diffIDIsNeeded, bar) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + diffID = diffIDResult.digest + } + } + + bar.SetTotal(srcInfo.Size, true) + return blobInfo, diffID, nil +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps compressing the stream if canCompress, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() + }() + + getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps compressing it if canCompress, +// and returns a complete blobInfo of the copied blob. +func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, + canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers. + // === Input: srcStream + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) + } + var destStream io.Reader = digestingReader + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) + } + isCompressed := decompressor != nil + destStream = bar.ProxyReader(destStream) + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) + originalLayerReader = destStream + } + + desiredCompressionFormat := c.compressionFormat + + // === Deal with layer compression/decompression if necessary + var inputInfo types.BlobInfo + var compressionOperation types.LayerCompression + if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { + logrus.Debugf("Compressing blob on the fly") + compressionOperation = types.Compress + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + compressionOperation = types.PreserveOriginal + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter + + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { + logrus.Debugf("Blob will be decompressed") + compressionOperation = types.Decompress + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + destStream = s + inputInfo.Digest = "" + inputInfo.Size = -1 + } else { + // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. + logrus.Debugf("Using original blob without modification") + compressionOperation = types.PreserveOriginal + inputInfo = srcInfo + } + + // === Report progress using the c.progress channel, if required. + if c.progress != nil && c.progressInterval > 0 { + destStream = &progressReader{ + source: destStream, + channel: c.progress, + interval: c.progressInterval, + artifact: srcInfo, + lastTime: time.Now(), + } + } + + // === Finally, send the layer stream to dest. + uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") + } + + uploadedInfo.Annotations = srcInfo.Annotations + + uploadedInfo.CompressionOperation = compressionOperation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + if canModifyBlob && !isConfig { + uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(ioutil.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } + } + return uploadedInfo, nil +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() + }() + + compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel) + if err != nil { + return + } + defer compressor.Close() + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() +} diff --git a/vendor/github.com/containers/image/v4/copy/manifest.go b/vendor/github.com/containers/image/v4/copy/manifest.go new file mode 100644 index 000000000..7c981fcad --- /dev/null +++ b/vendor/github.com/containers/image/v4/copy/manifest.go @@ -0,0 +1,121 @@ +package copy + +import ( + "context" + "strings" + + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. +type orderedSet struct { + list []string + included map[string]struct{} +} + +// newOrderedSet creates a correctly initialized orderedSet. +// [Sometimes it would be really nice if Golang had constructors…] +func newOrderedSet() *orderedSet { + return &orderedSet{ + list: []string{}, + included: map[string]struct{}{}, + } +} + +// append adds s to the end of os, only if it is not included already. +func (os *orderedSet) append(s string) { + if _, ok := os.included[s]; !ok { + os.list = append(os.list, s) + os.included[s] = struct{}{} + } +} + +// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. +// Note that the conversion will only happen later, through ic.src.UpdatedImage +// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), +// and a list of other possible alternatives, in order. +func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { + _, srcType, err := ic.src.Manifest(ctx) + if err != nil { // This should have been cached?! + return "", nil, errors.Wrap(err, "Error reading manifest") + } + normalizedSrcType := manifest.NormalizedMIMEType(srcType) + if srcType != normalizedSrcType { + logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) + srcType = normalizedSrcType + } + + if forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + } + + if len(destSupportedManifestMIMETypes) == 0 { + return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. + } + supportedByDest := map[string]struct{}{} + for _, t := range destSupportedManifestMIMETypes { + supportedByDest[t] = struct{}{} + } + + // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. + // So, build a list of types to try in order of decreasing preference. + // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, + // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. + // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types + // and never attempt the other one. + prioritizedTypes := newOrderedSet() + + // First of all, prefer to keep the original manifest unmodified. + if _, ok := supportedByDest[srcType]; ok { + prioritizedTypes.append(srcType) + } + if !ic.canModifyManifest { + // We could also drop the !ic.canModifyManifest check and have the caller + // make the choice; it is already doing that to an extent, to improve error + // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” + // special case in here; the caller can then worry (or not) only about a good UI. + logrus.Debugf("We can't modify the manifest, hoping for the best...") + return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? + } + + // Then use our list of preferred types. + for _, t := range preferredManifestMIMETypes { + if _, ok := supportedByDest[t]; ok { + prioritizedTypes.append(t) + } + } + + // Finally, try anything else the destination supports. + for _, t := range destSupportedManifestMIMETypes { + prioritizedTypes.append(t) + } + + logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. + return "", nil, errors.New("Internal error: no candidate MIME types") + } + preferredType := prioritizedTypes.list[0] + if preferredType != srcType { + ic.manifestUpdates.ManifestMIMEType = preferredType + } else { + logrus.Debugf("... will first try using the original manifest unmodified") + } + return preferredType, prioritizedTypes.list[1:], nil +} + +// isMultiImage returns true if img is a list of images +func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest(ctx) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} diff --git a/vendor/github.com/containers/image/v4/copy/progress_reader.go b/vendor/github.com/containers/image/v4/copy/progress_reader.go new file mode 100644 index 000000000..c6a1e3da6 --- /dev/null +++ b/vendor/github.com/containers/image/v4/copy/progress_reader.go @@ -0,0 +1,28 @@ +package copy + +import ( + "io" + "time" + + "github.com/containers/image/v4/types" +) + +// progressReader is a reader that reports its progress on an interval. +type progressReader struct { + source io.Reader + channel chan types.ProgressProperties + interval time.Duration + artifact types.BlobInfo + lastTime time.Time + offset uint64 +} + +func (r *progressReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.offset += uint64(n) + if time.Since(r.lastTime) > r.interval { + r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset} + r.lastTime = time.Now() + } + return n, err +} diff --git a/vendor/github.com/containers/image/v4/copy/sign.go b/vendor/github.com/containers/image/v4/copy/sign.go new file mode 100644 index 000000000..64c3b4b2b --- /dev/null +++ b/vendor/github.com/containers/image/v4/copy/sign.go @@ -0,0 +1,31 @@ +package copy + +import ( + "github.com/containers/image/v4/signature" + "github.com/containers/image/v4/transports" + "github.com/pkg/errors" +) + +// createSignature creates a new signature of manifest using keyIdentity. +func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, errors.Wrap(err, "Error initializing GPG") + } + defer mech.Close() + if err := mech.SupportsSigning(); err != nil { + return nil, errors.Wrap(err, "Signing not supported") + } + + dockerReference := c.dest.Reference().DockerReference() + if dockerReference == nil { + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + + c.Printf("Signing manifest\n") + newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) + if err != nil { + return nil, errors.Wrap(err, "Error creating signature") + } + return newSig, nil +} diff --git a/vendor/github.com/containers/image/v4/directory/directory_dest.go b/vendor/github.com/containers/image/v4/directory/directory_dest.go new file mode 100644 index 000000000..18f7dde70 --- /dev/null +++ b/vendor/github.com/containers/image/v4/directory/directory_dest.go @@ -0,0 +1,260 @@ +package directory + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const version = "Directory Transport Version: 1.1\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + +type dirImageDestination struct { + ref dirReference + compress bool +} + +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { + d := &dirImageDestination{ref: ref, compress: compress} + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(d.ref.resolvedPath) + if err != nil { + return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) + } + if dirExists { + isEmpty, err := isDirEmpty(d.ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(d.ref.versionPath()) + if err != nil { + return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) + } + if versionExists { + contents, err := ioutil.ReadFile(d.ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(d.ref.resolvedPath); err != nil { + return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) + } + logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { + return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + } + } + // create version file + err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) + if err != nil { + return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dirImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dirImageDestination) Close() error { + return nil +} + +func (d *dirImageDestination) SupportedManifestMIMETypes() []string { + return nil +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.compress { + return types.Compress + } + return types.PreserveOriginal +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *dirImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dirImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + defer func() { + blobFile.Close() + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + blobPath := d.ref.layerPath(computedDigest) + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil + +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte) error { + return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) +} + +func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + for i, sig := range signatures { + if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil { + return err + } + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dirImageDestination) Commit(ctx context.Context) error { + return nil +} + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if err != nil && os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v4/directory/directory_src.go b/vendor/github.com/containers/image/v4/directory/directory_src.go new file mode 100644 index 000000000..921c1941c --- /dev/null +++ b/vendor/github.com/containers/image/v4/directory/directory_src.go @@ -0,0 +1,96 @@ +package directory + +import ( + "context" + "io" + "io/ioutil" + "os" + + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type dirImageSource struct { + ref dirReference +} + +// newImageSource returns an ImageSource reading from an existing directory. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ref dirReference) types.ImageSource { + return &dirImageSource{ref} +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dirImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dirImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) + } + m, err := ioutil.ReadFile(s.ref.manifestPath()) + if err != nil { + return nil, "", err + } + return m, manifest.GuessMIMEType(m), err +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dirImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + r, err := os.Open(s.ref.layerPath(info.Digest)) + if err != nil { + return nil, -1, err + } + fi, err := r.Stat() + if err != nil { + return nil, -1, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) + } + signatures := [][]byte{} + for i := 0; ; i++ { + signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) + if err != nil { + if os.IsNotExist(err) { + break + } + return nil, err + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. +func (s *dirImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v4/directory/directory_transport.go b/vendor/github.com/containers/image/v4/directory/directory_transport.go new file mode 100644 index 000000000..29ac7115f --- /dev/null +++ b/vendor/github.com/containers/image/v4/directory/directory_transport.go @@ -0,0 +1,187 @@ +package directory + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v4/directory/explicitfilepath" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for directory paths. +var Transport = dirTransport{} + +type dirTransport struct{} + +func (t dirTransport) Name() string { + return "dir" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// dirReference is an ImageReference for directory paths. +type dirReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + path string // As specified by the user. May be relative, contain symlinks, etc. + resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no directory.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// dirTransport.ParseReference; callers who intentionally deal with directories +// can use directory.NewReference. + +// NewReference returns a directory reference for a specified path. +// +// We do not expose an API supplying the resolvedPath; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedPath. +func NewReference(path string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) + if err != nil { + return nil, err + } + return dirReference{path: path, resolvedPath: resolved}, nil +} + +func (ref dirReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dirReference) StringWithinTransport() string { + return ref.path +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dirReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dirReference) PolicyConfigurationIdentity() string { + return ref.resolvedPath +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dirReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedPath + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by dirTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src := newImageSource(ref) + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ref), nil +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + compress := false + if sys != nil { + compress = sys.DirForceCompress + } + return newImageDestination(ref, compress) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for dir: images") +} + +// manifestPath returns a path for the manifest within a directory using our conventions. +func (ref dirReference) manifestPath() string { + return filepath.Join(ref.path, "manifest.json") +} + +// layerPath returns a path for a layer tarball within a directory using our conventions. +func (ref dirReference) layerPath(digest digest.Digest) string { + // FIXME: Should we keep the digest identification? + return filepath.Join(ref.path, digest.Hex()) +} + +// signaturePath returns a path for a signature within a directory using our conventions. +func (ref dirReference) signaturePath(index int) string { + return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) +} + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go new file mode 100644 index 000000000..71136b880 --- /dev/null +++ b/vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go @@ -0,0 +1,56 @@ +package explicitfilepath + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch _, err := os.Lstat(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/vendor/github.com/containers/image/v4/docker/archive/dest.go b/vendor/github.com/containers/image/v4/docker/archive/dest.go new file mode 100644 index 000000000..9e06e7c96 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/archive/dest.go @@ -0,0 +1,72 @@ +package archive + +import ( + "context" + "io" + "os" + + "github.com/containers/image/v4/docker/tarfile" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +type archiveImageDestination struct { + *tarfile.Destination // Implements most of types.ImageDestination + ref archiveReference + writer io.Closer +} + +func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { + // ref.path can be either a pipe or a regular file + // in the case of a pipe, we require that we can open it for write + // in the case of a regular file, we don't want to overwrite any pre-existing file + // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, + // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) + fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, errors.Wrapf(err, "error opening file %q", ref.path) + } + + fhStat, err := fh.Stat() + if err != nil { + return nil, errors.Wrapf(err, "error statting file %q", ref.path) + } + + if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { + return nil, errors.New("docker-archive doesn't support modifying existing images") + } + + tarDest := tarfile.NewDestination(fh, ref.destinationRef) + if sys != nil && sys.DockerArchiveAdditionalTags != nil { + tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) + } + return &archiveImageDestination{ + Destination: tarDest, + ref: ref, + writer: fh, + }, nil +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Decompress +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *archiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *archiveImageDestination) Close() error { + return d.writer.Close() +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *archiveImageDestination) Commit(ctx context.Context) error { + return d.Destination.Commit(ctx) +} diff --git a/vendor/github.com/containers/image/v4/docker/archive/src.go b/vendor/github.com/containers/image/v4/docker/archive/src.go new file mode 100644 index 000000000..feea0decd --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/archive/src.go @@ -0,0 +1,40 @@ +package archive + +import ( + "context" + "github.com/containers/image/v4/docker/tarfile" + "github.com/containers/image/v4/types" + "github.com/sirupsen/logrus" +) + +type archiveImageSource struct { + *tarfile.Source // Implements most of types.ImageSource + ref archiveReference +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) { + if ref.destinationRef != nil { + logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") + } + src, err := tarfile.NewSourceFromFile(ref.path) + if err != nil { + return nil, err + } + return &archiveImageSource{ + Source: src, + ref: ref, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *archiveImageSource) Reference() types.ImageReference { + return s.ref +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *archiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v4/docker/archive/transport.go b/vendor/github.com/containers/image/v4/docker/archive/transport.go new file mode 100644 index 000000000..347fdbd6e --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/archive/transport.go @@ -0,0 +1,160 @@ +package archive + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/image/v4/docker/reference" + ctrImage "github.com/containers/image/v4/image" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for local Docker archives. +var Transport = archiveTransport{} + +type archiveTransport struct{} + +func (t archiveTransport) Name() string { + return "docker-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in archiveReference.PolicyConfigurationIdentity. + return errors.New(`docker-archive: does not support any scopes except the default "" one`) +} + +// archiveReference is an ImageReference for Docker images. +type archiveReference struct { + // only used for destinations + // archiveReference.destinationRef is optional and can be nil for destinations as well. + destinationRef reference.NamedTagged + path string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if refString == "" { + return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) + } + + parts := strings.SplitN(refString, ":", 2) + path := parts[0] + var destinationRef reference.NamedTagged + + // A :tag was specified, which is only necessary for destinations. + if len(parts) == 2 { + ref, err := reference.ParseNormalizedNamed(parts[1]) + if err != nil { + return nil, errors.Wrapf(err, "docker-archive parsing reference") + } + ref = reference.TagNameOnly(ref) + + if _, isDigest := ref.(reference.Canonical); isDigest { + return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) + } + + refTagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + // Really shouldn't be hit... + return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString) + } + destinationRef = refTagged + } + + return archiveReference{ + destinationRef: destinationRef, + path: path, + }, nil +} + +func (ref archiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref archiveReference) StringWithinTransport() string { + if ref.destinationRef == nil { + return ref.path + } + return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String()) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref archiveReference) DockerReference() reference.Named { + return ref.destinationRef +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref archiveReference) PolicyConfigurationIdentity() string { + // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. + return "" +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref archiveReference) PolicyConfigurationNamespaces() []string { + // TODO + return []string{} +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, ref) + if err != nil { + return nil, err + } + return ctrImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Not really supported, for safety reasons. + return errors.New("Deleting images not implemented for docker-archive: images") +} diff --git a/vendor/github.com/containers/image/v4/docker/cache.go b/vendor/github.com/containers/image/v4/docker/cache.go new file mode 100644 index 000000000..51bf5b0d3 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/cache.go @@ -0,0 +1,23 @@ +package docker + +import ( + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/types" +) + +// bicTransportScope returns a BICTransportScope appropriate for ref. +func bicTransportScope(ref dockerReference) types.BICTransportScope { + // Blobs can be reused across the whole registry. + return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} +} + +// newBICLocationReference returns a BICLocationReference appropriate for ref. +func newBICLocationReference(ref dockerReference) types.BICLocationReference { + // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). + return types.BICLocationReference{Opaque: ref.ref.Name()} +} + +// parseBICLocationReference returns a repository for encoded lr. +func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { + return reference.ParseNormalizedNamed(lr.Opaque) +} diff --git a/vendor/github.com/containers/image/v4/docker/daemon/client.go b/vendor/github.com/containers/image/v4/docker/daemon/client.go new file mode 100644 index 000000000..94c4970f2 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/daemon/client.go @@ -0,0 +1,85 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/v4/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if sys != nil && sys.DockerDaemonHost != "" { + host = sys.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // + // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set + // TLSClientConfig to nil. This can be achieved by using the form `http://` + url, err := dockerclient.ParseHostURL(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if url.Scheme != "unix" { + if url.Scheme == "http" { + httpClient = httpConfig() + } else { + hc, err := tlsConfig(sys) + if err != nil { + return nil, err + } + httpClient = hc + } + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(sys *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if sys != nil && sys.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} + +func httpConfig() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: nil, + }, + CheckRedirect: dockerclient.CheckRedirect, + } +} diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go new file mode 100644 index 000000000..2c56ab934 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go @@ -0,0 +1,144 @@ +package daemon + +import ( + "context" + "io" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/docker/tarfile" + "github.com/containers/image/v4/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type daemonImageDestination struct { + ref daemonReference + mustMatchRuntimeOS bool + *tarfile.Destination // Implements most of types.ImageDestination + // For talking to imageLoadGoroutine + goroutineCancel context.CancelFunc + statusChannel <-chan error + writer *io.PipeWriter + // Other state + committed bool // writer has been closed +} + +// newImageDestination returns a types.ImageDestination for the specified image reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { + if ref.ref == nil { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + namedTaggedRef, ok := ref.ref.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + + var mustMatchRuntimeOS = true + if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "Error initializing docker engine client") + } + + reader, writer := io.Pipe() + // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. + statusChannel := make(chan error, 1) + + goroutineContext, goroutineCancel := context.WithCancel(ctx) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) + + return &daemonImageDestination{ + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(writer, namedTaggedRef), + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, + }, nil +} + +// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel +func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") + defer func() { + logrus.Debugf("docker-daemon: sending done, status %v", err) + statusChannel <- err + }() + defer func() { + if err == nil { + reader.Close() + } else { + reader.CloseWithError(err) + } + }() + + resp, err := c.ImageLoad(ctx, reader, true) + if err != nil { + err = errors.Wrap(err, "Error saving image to docker engine") + return + } + defer resp.Body.Close() +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *daemonImageDestination) MustMatchRuntimeOS() bool { + return d.mustMatchRuntimeOS +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *daemonImageDestination) Close() error { + if !d.committed { + logrus.Debugf("docker-daemon: Closing tar stream to abort loading") + // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. + // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including + // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the + // net/http version with native Context support in Go 1.7) do not always actually immediately cancel + // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and + // return early if the context is canceled without terminating the goroutine at all. + // So we need this CloseWithError to terminate sending the HTTP request Body + // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending + // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. + // Whether that works or not, closing the PipeWriter seems desirable in any case. + d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) + } + d.goroutineCancel() + + return nil +} + +func (d *daemonImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) Commit(ctx context.Context) error { + logrus.Debugf("docker-daemon: Closing tar stream") + if err := d.Destination.Commit(ctx); err != nil { + return err + } + if err := d.writer.Close(); err != nil { + return err + } + d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. + + logrus.Debugf("docker-daemon: Waiting for status") + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-d.statusChannel: + return err + } +} diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go new file mode 100644 index 000000000..f6f60aaf9 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go @@ -0,0 +1,62 @@ +package daemon + +import ( + "context" + + "github.com/containers/image/v4/docker/tarfile" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +type daemonImageSource struct { + ref daemonReference + *tarfile.Source // Implements most of types.ImageSource +} + +type layerInfo struct { + path string + size int64 +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +// +// It would be great if we were able to stream the input tar as it is being +// sent; but Docker sends the top-level manifest, which determines which paths +// to look for, at the end, so in we will need to seek back and re-read, several times. +// (We could, perhaps, expect an exact sequence, assume that the first plaintext file +// is the config, and that the following len(RootFS) files are the layers, but that feels +// way too brittle.) +func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "Error initializing docker engine client") + } + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. + // Either way ImageSave should create a tarball with exactly one image. + inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) + if err != nil { + return nil, errors.Wrap(err, "Error loading image from docker engine") + } + defer inputStream.Close() + + src, err := tarfile.NewSourceFromStream(inputStream) + if err != nil { + return nil, err + } + return &daemonImageSource{ + ref: ref, + Source: src, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *daemonImageSource) Reference() types.ImageReference { + return s.ref +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *daemonImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go new file mode 100644 index 000000000..4c6986ba0 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go @@ -0,0 +1,223 @@ +package daemon + +import ( + "context" + "fmt" + + "github.com/containers/image/v4/docker/policyconfiguration" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for images managed by a local Docker daemon. +var Transport = daemonTransport{} + +type daemonTransport struct{} + +// Name returns the name of the transport, which must be unique among other transports. +func (t daemonTransport) Name() string { + return "docker-daemon" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { + // ID values cannot be effectively namespaced, and are clearly invalid host:port values. + if _, err := digest.Parse(scope); err == nil { + return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) + } + + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// daemonReference is an ImageReference for images managed by a local Docker daemon +// Exactly one of id and ref can be set. +// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) +// For daemonImageDestination, it must be a ref, which is NamedTagged. +// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +type daemonReference struct { + id digest.Digest + ref reference.Named // !reference.IsNameOnly +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. + // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). + + // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). + // reference.ParseAnyReference interprets such strings as digests. + if dgst, err := digest.Parse(refString); err == nil { + // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. + // Other digest references are ambiguous, so refuse them. + if dgst.Algorithm() != digest.Canonical { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + } + return NewReference(dgst, nil) + } + + ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values + if err != nil { + return nil, err + } + if reference.FamiliarName(ref) == digest.Canonical.String() { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + } + return NewReference("", ref) +} + +// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) +func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { + if id != "" && ref != nil { + return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") + } + if ref != nil { + if reference.IsNameOnly(ref) { + return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. + // This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + } + } + return daemonReference{ + id: id, + ref: ref, + }, nil +} + +func (ref daemonReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref daemonReference) StringWithinTransport() string { + switch { + case ref.id != "": + return ref.id.String() + case ref.ref != nil: + return reference.FamiliarString(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref daemonReference) DockerReference() reference.Named { + return ref.ref // May be nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref daemonReference) PolicyConfigurationIdentity() string { + // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. + // But the existence of image IDs means that we can’t truly well namespace the input: + // a single image can be namespaced either using the name or the ID depending on how it is named. + // + // That’s fairly unexpected, but we have to cope somehow. + // + // So, use the ordinary docker/policyconfiguration namespacing for named images. + // image IDs all fall into the root namespace. + // Users can set up the root namespace to be either untrusted or rejected, + // and to set up specific trust for named namespaces. This allows verifying image + // identity when a name is known, and unnamed images would be untrusted or rejected. + switch { + case ref.id != "": + return "" // This still allows using the default "" scope to define a global policy for ID-identified images. + case ref.ref != nil: + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref daemonReference) PolicyConfigurationNamespaces() []string { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + switch { + case ref.id != "": + return []string{} + case ref.ref != nil: + return policyconfiguration.DockerReferenceNamespaces(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Should this just untag the image? Should this stop running containers? + // The semantics is not quite as clear as for remote repositories. + // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. + return errors.Errorf("Deleting images not implemented for docker-daemon: images") +} diff --git a/vendor/github.com/containers/image/v4/docker/docker_client.go b/vendor/github.com/containers/image/v4/docker/docker_client.go new file mode 100644 index 000000000..d5662a030 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/docker_client.go @@ -0,0 +1,645 @@ +package docker + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/pkg/docker/config" + "github.com/containers/image/v4/pkg/sysregistriesv2" + "github.com/containers/image/v4/pkg/tlsclientconfig" + "github.com/containers/image/v4/types" + "github.com/docker/distribution/registry/client" + "github.com/docker/go-connections/tlsconfig" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + dockerHostname = "docker.io" + dockerV1Hostname = "index.docker.io" + dockerRegistry = "registry-1.docker.io" + + resolvedPingV2URL = "%s://%s/v2/" + resolvedPingV1URL = "%s://%s/v1/_ping" + tagsPath = "/v2/%s/tags/list" + manifestPath = "/v2/%s/manifests/%s" + blobsPath = "/v2/%s/blobs/%s" + blobUploadPath = "/v2/%s/blobs/uploads/" + extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" + + minimumTokenLifetimeSeconds = 60 + + extensionSignatureSchemaVersion = 2 // extensionSignature.Version + extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type +) + +var ( + // ErrV1NotSupported is returned when we're trying to talk to a + // docker V1 registry. + ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") + // ErrUnauthorizedForCredentials is returned when the status code returned is 401 + ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") + systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} +) + +// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: +// signature represents a Docker image signature. +type extensionSignature struct { + Version int `json:"schemaVersion"` // Version specifies the schema version + Name string `json:"name"` // Name must be in "sha256:@signatureName" format + Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" + Content []byte `json:"content"` // Content contains the signature +} + +// signatureList represents list of Docker image signatures. +type extensionSignatureList struct { + Signatures []extensionSignature `json:"signatures"` +} + +type bearerToken struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + expirationTime time.Time +} + +// dockerClient is configuration for dealing with a single Docker registry. +type dockerClient struct { + // The following members are set by newDockerClient and do not change afterwards. + sys *types.SystemContext + registry string + + // tlsClientConfig is setup by newDockerClient and will be used and updated + // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. + tlsClientConfig *tls.Config + // The following members are not set by newDockerClient and must be set by callers if needed. + username string + password string + signatureBase signatureStorageBase + scope authScope + + // The following members are detected registry properties: + // They are set after a successful detectProperties(), and never change afterwards. + client *http.Client + scheme string + challenges []challenge + supportsSignatures bool + + // Private state for setupRequestAuth (key: string, value: bearerToken) + tokenCache sync.Map + // Private state for detectProperties: + detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. + detectPropertiesError error // detectPropertiesError caches the initial error. +} + +type authScope struct { + remoteName string + actions string +} + +// sendAuth determines whether we need authentication for v2 or v1 endpoint. +type sendAuth int + +const ( + // v2 endpoint with authentication. + v2Auth sendAuth = iota + // v1 endpoint with authentication. + // TODO: Get v1Auth working + // v1Auth + // no authentication, works for both v1 and v2. + noAuth +) + +func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { + token := new(bearerToken) + if err := json.Unmarshal(blob, &token); err != nil { + return nil, err + } + if token.Token == "" { + token.Token = token.AccessToken + } + if token.ExpiresIn < minimumTokenLifetimeSeconds { + token.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) + } + if token.IssuedAt.IsZero() { + token.IssuedAt = time.Now().UTC() + } + token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) + return token, nil +} + +// this is cloned from docker/go-connections because upstream docker has changed +// it and make deps here fails otherwise. +// We'll drop this once we upgrade to docker 1.13.x deps. +func serverDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } +} + +// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. +func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { + if sys != nil && sys.DockerCertPath != "" { + return sys.DockerCertPath, nil + } + if sys != nil && sys.DockerPerHostCertDirPath != "" { + return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil + } + + var ( + hostCertDir string + fullCertDirPath string + ) + for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) + } else { + hostCertDir = systemPerHostCertDirPath + } + + fullCertDirPath = filepath.Join(hostCertDir, hostPort) + _, err := os.Stat(fullCertDirPath) + if err == nil { + break + } + if os.IsNotExist(err) { + continue + } + if os.IsPermission(err) { + logrus.Debugf("error accessing certs directory due to permissions: %v", err) + continue + } + if err != nil { + return "", err + } + } + return fullCertDirPath, nil +} + +// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) +// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) +func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { + registry := reference.Domain(ref.ref) + username, password, err := config.GetAuthentication(sys, registry) + if err != nil { + return nil, errors.Wrapf(err, "error getting username and password") + } + sigBase, err := configuredSignatureStorageBase(sys, ref, write) + if err != nil { + return nil, err + } + + client, err := newDockerClient(sys, registry, ref.ref.Name()) + if err != nil { + return nil, err + } + client.username = username + client.password = password + client.signatureBase = sigBase + client.scope.actions = actions + client.scope.remoteName = reference.Path(ref.ref) + return client, nil +} + +// newDockerClient returns a new dockerClient instance for the given registry +// and reference. The reference is used to query the registry configuration +// and can either be a registry (e.g, "registry.com[:5000]"), a repository +// (e.g., "registry.com[:5000][/some/namespace]/repo"). +// Please note that newDockerClient does not set all members of dockerClient +// (e.g., username and password); those must be set by callers if necessary. +func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { + hostName := registry + if registry == dockerHostname { + registry = dockerRegistry + } + tlsClientConfig := serverDefault() + + // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, + // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible + // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because + // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is + // undocumented and may change if docker/docker changes. + certDir, err := dockerCertDir(sys, hostName) + if err != nil { + return nil, err + } + if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { + return nil, err + } + + // Check if TLS verification shall be skipped (default=false) which can + // be specified in the sysregistriesv2 configuration. + skipVerify := false + reg, err := sysregistriesv2.FindRegistry(sys, reference) + if err != nil { + return nil, errors.Wrapf(err, "error loading registries") + } + if reg != nil { + if reg.Blocked { + return nil, fmt.Errorf("registry %s is blocked in %s", reg.Prefix, sysregistriesv2.ConfigPath(sys)) + } + skipVerify = reg.Insecure + } + tlsClientConfig.InsecureSkipVerify = skipVerify + + return &dockerClient{ + sys: sys, + registry: registry, + tlsClientConfig: tlsClientConfig, + }, nil +} + +// CheckAuth validates the credentials by attempting to log into the registry +// returns an error if an error occurred while making the http request or the status code received was 401 +func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { + client, err := newDockerClient(sys, registry, registry) + if err != nil { + return errors.Wrapf(err, "error creating new docker client") + } + client.username = username + client.password = password + + resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusUnauthorized: + return ErrUnauthorizedForCredentials + default: + return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode)) + } +} + +// SearchResult holds the information of each matching image +// It matches the output returned by the v1 endpoint +type SearchResult struct { + Name string `json:"name"` + Description string `json:"description"` + // StarCount states the number of stars the image has + StarCount int `json:"star_count"` + IsTrusted bool `json:"is_trusted"` + // IsAutomated states whether the image is an automated build + IsAutomated bool `json:"is_automated"` + // IsOfficial states whether the image is an official build + IsOfficial bool `json:"is_official"` +} + +// SearchRegistry queries a registry for images that contain "image" in their name +// The limit is the max number of results desired +// Note: The limit value doesn't work with all registries +// for example registry.access.redhat.com returns all the results without limiting it to the limit value +func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { + type V2Results struct { + // Repositories holds the results returned by the /v2/_catalog endpoint + Repositories []string `json:"repositories"` + } + type V1Results struct { + // Results holds the results returned by the /v1/search endpoint + Results []SearchResult `json:"results"` + } + v2Res := &V2Results{} + v1Res := &V1Results{} + + // Get credentials from authfile for the underlying hostname + username, password, err := config.GetAuthentication(sys, registry) + if err != nil { + return nil, errors.Wrapf(err, "error getting username and password") + } + + // The /v2/_catalog endpoint has been disabled for docker.io therefore + // the call made to that endpoint will fail. So using the v1 hostname + // for docker.io for simplicity of implementation and the fact that it + // returns search results. + hostname := registry + if registry == dockerHostname { + hostname = dockerV1Hostname + } + + client, err := newDockerClient(sys, hostname, registry) + if err != nil { + return nil, errors.Wrapf(err, "error creating new docker client") + } + client.username = username + client.password = password + + // Only try the v1 search endpoint if the search query is not empty. If it is + // empty skip to the v2 endpoint. + if image != "" { + // set up the query values for the v1 endpoint + u := url.URL{ + Path: "/v1/search", + } + q := u.Query() + q.Set("q", image) + q.Set("n", strconv.Itoa(limit)) + u.RawQuery = q.Encode() + + logrus.Debugf("trying to talk to v1 search endpoint") + resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil) + if err != nil { + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) + } else { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + } else { + if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { + return nil, err + } + return v1Res.Results, nil + } + } + } + + logrus.Debugf("trying to talk to v2 search endpoint") + resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil) + if err != nil { + logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) + } else { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + } else { + if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { + return nil, err + } + searchRes := []SearchResult{} + for _, repo := range v2Res.Repositories { + if strings.Contains(repo, image) { + res := SearchResult{ + Name: repo, + } + searchRes = append(searchRes, res) + } + } + return searchRes, nil + } + } + + return nil, errors.Wrapf(err, "couldn't search registry %q", registry) +} + +// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. +func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { + if err := c.detectProperties(ctx); err != nil { + return nil, err + } + + url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) +} + +// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// TODO(runcom): too many arguments here, use a struct +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequest(method, url, stream) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. + req.ContentLength = streamLen + } + req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") + for n, h := range headers { + for _, hh := range h { + req.Header.Add(n, hh) + } + } + if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { + req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) + } + if auth == v2Auth { + if err := c.setupRequestAuth(req, extraScope); err != nil { + return nil, err + } + } + logrus.Debugf("%s %s", method, url) + res, err := c.client.Do(req) + if err != nil { + return nil, err + } + return res, nil +} + +// we're using the challenges from the /v2/ ping response and not the one from the destination +// URL in this request because: +// +// 1) docker does that as well +// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request +// +// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up +func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { + if len(c.challenges) == 0 { + return nil + } + schemeNames := make([]string, 0, len(c.challenges)) + for _, challenge := range c.challenges { + schemeNames = append(schemeNames, challenge.Scheme) + switch challenge.Scheme { + case "basic": + req.SetBasicAuth(c.username, c.password) + return nil + case "bearer": + cacheKey := "" + scopes := []authScope{c.scope} + if extraScope != nil { + // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). + cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) + scopes = append(scopes, *extraScope) + } + var token bearerToken + t, inCache := c.tokenCache.Load(cacheKey) + if inCache { + token = t.(bearerToken) + } + if !inCache || time.Now().After(token.expirationTime) { + t, err := c.getBearerToken(req.Context(), challenge, scopes) + if err != nil { + return err + } + token = *t + c.tokenCache.Store(cacheKey, token) + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) + return nil + default: + logrus.Debugf("no handler for %s authentication", challenge.Scheme) + } + } + logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) + return nil +} + +func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.Errorf("missing realm in bearer auth challenge") + } + + authReq, err := http.NewRequest("GET", realm, nil) + if err != nil { + return nil, err + } + authReq = authReq.WithContext(ctx) + getParams := authReq.URL.Query() + if c.username != "" { + getParams.Add("account", c.username) + } + if service, ok := challenge.Parameters["service"]; ok && service != "" { + getParams.Add("service", service) + } + for _, scope := range scopes { + if scope.remoteName != "" && scope.actions != "" { + getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + } + } + authReq.URL.RawQuery = getParams.Encode() + if c.username != "" && c.password != "" { + authReq.SetBasicAuth(c.username, c.password) + } + logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + res, err := c.client.Do(authReq) + if err != nil { + return nil, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusUnauthorized: + err := client.HandleErrorResponse(res) + logrus.Debugf("Server response when trying to obtain an access token: \n%q", err.Error()) + return nil, ErrUnauthorizedForCredentials + case http.StatusOK: + break + default: + return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) + } + tokenBlob, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + return newBearerTokenFromJSONBlob(tokenBlob) +} + +// detectPropertiesHelper performs the work of detectProperties which executes +// it at most once. +func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { + // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly + // specified by the system context + if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { + c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue + } + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = c.tlsClientConfig + c.client = &http.Client{Transport: tr} + + ping := func(scheme string) error { + url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + return err + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + c.challenges = parseAuthHeader(resp.Header) + c.scheme = scheme + c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" + return nil + } + err := ping("https") + if err != nil && c.tlsClientConfig.InsecureSkipVerify { + err = ping("http") + } + if err != nil { + err = errors.Wrap(err, "pinging docker registry returned") + if c.sys != nil && c.sys.DockerDisableV1Ping { + return err + } + // best effort to understand if we're talking to a V1 registry + pingV1 := func(scheme string) bool { + url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + return false + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return false + } + return true + } + isV1 := pingV1("https") + if !isV1 && c.tlsClientConfig.InsecureSkipVerify { + isV1 = pingV1("http") + } + if isV1 { + err = ErrV1NotSupported + } + } + return err +} + +// detectProperties detects various properties of the registry. +// See the dockerClient documentation for members which are affected by this. +func (c *dockerClient) detectProperties(ctx context.Context) error { + c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) + return c.detectPropertiesError +} + +// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, +// using the original data structures. +func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) + res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + var parsedBody extensionSignatureList + if err := json.Unmarshal(body, &parsedBody); err != nil { + return nil, errors.Wrapf(err, "Error decoding signature list") + } + return &parsedBody, nil +} diff --git a/vendor/github.com/containers/image/v4/docker/docker_image.go b/vendor/github.com/containers/image/v4/docker/docker_image.go new file mode 100644 index 000000000..4332dc020 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/docker_image.go @@ -0,0 +1,107 @@ +package docker + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods +// which are specific to Docker. +type Image struct { + types.ImageCloser + src *dockerImageSource +} + +// newImage returns a new Image interface type after setting up +// a client to the registry hosting the given image. +// The caller must call .Close() on the returned Image. +func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { + s, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, s) + if err != nil { + return nil, err + } + return &Image{ImageCloser: img, src: s}, nil +} + +// SourceRefFullName returns a fully expanded name for the repository this image is in. +func (i *Image) SourceRefFullName() string { + return i.src.ref.ref.Name() +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. (This is a +// backward-compatible shim method which calls the module-level +// GetRepositoryTags) +func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { + return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref) +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. +func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { + dr, ok := ref.(dockerReference) + if !ok { + return nil, errors.Errorf("ref must be a dockerReference") + } + + path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) + client, err := newDockerClientFromRef(sys, dr, false, "pull") + if err != nil { + return nil, errors.Wrap(err, "failed to create client") + } + + tags := make([]string, 0) + + for { + res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + // print url also + return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } + + var tagsHolder struct { + Tags []string + } + if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { + return nil, err + } + tags = append(tags, tagsHolder.Tags...) + + link := res.Header.Get("Link") + if link == "" { + break + } + + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + // can be relative or absolute, but we only want the path (and I + // guess we're in trouble if it forwards to a new place...) + path = linkURL.Path + if linkURL.RawQuery != "" { + path += "?" + path += linkURL.RawQuery + } + } + return tags, nil +} diff --git a/vendor/github.com/containers/image/v4/docker/docker_image_dest.go b/vendor/github.com/containers/image/v4/docker/docker_image_dest.go new file mode 100644 index 000000000..0f351ab59 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/docker_image_dest.go @@ -0,0 +1,611 @@ +package docker + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/pkg/blobinfocache/none" + "github.com/containers/image/v4/types" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerImageDestination struct { + ref dockerReference + c *dockerClient + // State + manifestDigest digest.Digest // or "" if not yet known. +} + +// newImageDestination creates a new ImageDestination for the specified image reference. +func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { + c, err := newDockerClientFromRef(sys, ref, true, "pull,push") + if err != nil { + return nil, err + } + return &dockerImageDestination{ + ref: ref, + c: c, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dockerImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dockerImageDestination) Close() error { + return nil +} + +func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.signatureBase != nil: + return nil + case d.c.supportsSignatures: + return nil + default: + return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") + } +} + +func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *dockerImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. +} + +// sizeCounter is an io.Writer which only counts the total size of its input. +type sizeCounter struct{ size int64 } + +func (c *sizeCounter) Write(p []byte) (n int, err error) { + c.size += int64(len(p)) + return len(p), nil +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dockerImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + if inputInfo.Digest.String() != "" { + // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. + // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. + // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. + haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false) + if err != nil { + return types.BlobInfo{}, err + } + if haveBlob { + return reusedInfo, nil + } + } + + // FIXME? Chunked upload, progress reporting, etc. + uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) + logrus.Debugf("Uploading %s", uploadPath) + res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil) + if err != nil { + return types.BlobInfo{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusAccepted { + logrus.Debugf("Error initiating layer upload, response %#v", *res) + return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry) + } + uploadLocation, err := res.Location() + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") + } + + digester := digest.Canonical.Digester() + sizeCounter := &sizeCounter{} + tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) + res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil) + if err != nil { + logrus.Debugf("Error uploading layer chunked, response %#v", res) + return types.BlobInfo{}, err + } + defer res.Body.Close() + computedDigest := digester.Digest() + + uploadLocation, err = res.Location() + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") + } + + // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) + + locationQuery := uploadLocation.Query() + // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 + locationQuery.Set("digest", computedDigest.String()) + uploadLocation.RawQuery = locationQuery.Encode() + res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + if err != nil { + return types.BlobInfo{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + logrus.Debugf("Error uploading layer, response %#v", *res) + return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation) + } + + logrus.Debugf("Upload of layer %s complete", computedDigest) + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref)) + return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil +} + +// blobExists returns true iff repo contains a blob with digest, and if so, also its size. +// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); +// it returns a non-nil error only on an unexpected failure. +func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { + checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) + logrus.Debugf("Checking %s", checkPath) + res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope) + if err != nil { + return false, -1, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusOK: + logrus.Debugf("... already exists") + return true, getBlobSize(res), nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name()) + case http.StatusNotFound: + logrus.Debugf("... not present") + return false, -1, nil + default: + return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) + } +} + +// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. +func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { + u := url.URL{ + Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), + RawQuery: url.Values{ + "mount": {srcDigest.String()}, + "from": {reference.Path(srcRepo)}, + }.Encode(), + } + mountPath := u.String() + logrus.Debugf("Trying to mount %s", mountPath) + res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope) + if err != nil { + return err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusCreated: + logrus.Debugf("... mount OK") + return nil + case http.StatusAccepted: + // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. + // Abort, and let the ultimate caller do an upload when its ready, instead. + // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. + uploadLocation, err := res.Location() + if err != nil { + return errors.Wrap(err, "Error determining upload URL after a mount attempt") + } + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) + res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) + if err != nil { + logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) + } else { + defer res2.Body.Close() + if res2.StatusCode != http.StatusNoContent { + logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) + } + } + // Anyway, if canceling the upload fails, ignore it and return the more important error: + return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + default: + logrus.Debugf("Error mounting, response %#v", *res) + return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + } +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + + // First, check whether the blob happens to already exist at the destination. + exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) + if err != nil { + return false, types.BlobInfo{}, err + } + if exists { + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + } + + // Then try reusing blobs from other locations. + for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { + candidateRepo, err := parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) + + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } + if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + logrus.Debug("... Already tried the primary destination") + continue + } + + // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. + + // Checking candidateRepo, and mounting from it, requires an + // expanded token scope. + extraScope := &authScope{ + remoteName: reference.Path(candidateRepo), + actions: "pull", + } + // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. + // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. + // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. + // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. + // Even worse, docker/distribution does not actually reasonably implement canceling uploads + // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); + // so, be a nice client and don't create unnecesary upload sessions on the server. + exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + if !exists { + // FIXME? Should we drop the blob from cache here (and elsewhere?)? + continue // logrus.Debug() already happened in blobExists + } + if candidateRepo.Name() != d.ref.ref.Name() { + if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { + logrus.Debugf("... Mount failed: %v", err) + continue + } + } + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil + } + + return false, types.BlobInfo{}, nil +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error { + digest, err := manifest.Digest(m) + if err != nil { + return err + } + d.manifestDigest = digest + + refTail, err := d.ref.tagOrDigest() + if err != nil { + return err + } + path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) + + headers := map[string][]string{} + mimeType := manifest.GuessMIMEType(m) + if mimeType != "" { + headers["Content-Type"] = []string{mimeType} + } + res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if !successStatus(res.StatusCode) { + err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name()) + if isManifestInvalidError(errors.Cause(err)) { + err = types.ManifestTypeRejectedError{Err: err} + } + return err + } + return nil +} + +// successStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func successStatus(status int) bool { + return status >= 200 && status <= 399 +} + +// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. +func isManifestInvalidError(err error) bool { + errors, ok := err.(errcode.Errors) + if !ok || len(errors) == 0 { + return false + } + err = errors[0] + ec, ok := err.(errcode.ErrorCoder) + if !ok { + return false + } + + switch ec.ErrorCode() { + // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. + case v2.ErrorCodeManifestInvalid: + return true + // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) + // when uploading to a tag (because it can’t find a matching tag inside the manifest) + case v2.ErrorCodeTagInvalid: + return true + // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when + // uploading an OCI manifest that is (correctly, according to the spec) missing + // a top-level media type. See libpod issue #1719 + // FIXME: remove this case when ECR behavior is fixed + case errcode.ErrorCodeUnsupported: + return strings.Contains(err.Error(), "Invalid JSON syntax") + default: + return false + } +} + +func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + // Do not fail if we don’t really need to support signatures. + if len(signatures) == 0 { + return nil + } + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.signatureBase != nil: + return d.putSignaturesToLookaside(signatures) + case d.c.supportsSignatures: + return d.putSignaturesToAPIExtension(ctx, signatures) + default: + return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") + } +} + +// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, +// which is not nil. +func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error { + // FIXME? This overwrites files one at a time, definitely not atomic. + // A failure when updating signatures with a reordered copy could lose some of them. + + // Skip dealing with the manifest digest if not necessary. + if len(signatures) == 0 { + return nil + } + + if d.manifestDigest.String() == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures + return errors.Errorf("Unknown manifest digest, can't add signatures") + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + for i, signature := range signatures { + url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + err := d.putOneSignature(url, signature) + if err != nil { + return err + } + } + // Remove any other signatures, if present. + // We stop at the first missing signature; if a previous deleting loop aborted + // prematurely, this may not clean up all of them, but one missing signature + // is enough for dockerImageSource to stop looking for other signatures, so that + // is sufficient. + for i := len(signatures); ; i++ { + url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + missing, err := d.c.deleteOneSignature(url) + if err != nil { + return err + } + if missing { + break + } + } + + return nil +} + +// putOneSignature stores one signature to url. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { + switch url.Scheme { + case "file": + logrus.Debugf("Writing to %s", url.Path) + err := os.MkdirAll(filepath.Dir(url.Path), 0755) + if err != nil { + return err + } + err = ioutil.WriteFile(url.Path, signature, 0644) + if err != nil { + return err + } + return nil + + case "http", "https": + return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + default: + return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) + } +} + +// deleteOneSignature deletes a signature from url, if it exists. +// If it successfully determines that the signature does not exist, returns (true, nil) +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { + switch url.Scheme { + case "file": + logrus.Debugf("Deleting %s", url.Path) + err := os.Remove(url.Path) + if err != nil && os.IsNotExist(err) { + return true, nil + } + return false, err + + case "http", "https": + return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + default: + return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) + } +} + +// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. +func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error { + // Skip dealing with the manifest digest, or reading the old state, if not necessary. + if len(signatures) == 0 { + return nil + } + + if d.manifestDigest.String() == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures + return errors.Errorf("Unknown manifest digest, can't add signatures") + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures, + // but the X-Registry-Supports-Signatures API extension does not support that yet. + + existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range existingSignatures.Signatures { + existingSigNames[sig.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range existingSignatures.Signatures { + if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "Error generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + sig := extensionSignature{ + Version: extensionSignatureSchemaVersion, + Name: signatureName, + Type: extensionSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String()) + res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + body, err := ioutil.ReadAll(res.Body) + if err == nil { + logrus.Debugf("Error body %s", string(body)) + } + logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) + return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry) + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dockerImageDestination) Commit(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/containers/image/v4/docker/docker_image_src.go b/vendor/github.com/containers/image/v4/docker/docker_image_src.go new file mode 100644 index 000000000..353b1a6c5 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/docker_image_src.go @@ -0,0 +1,451 @@ +package docker + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/pkg/sysregistriesv2" + "github.com/containers/image/v4/types" + "github.com/docker/distribution/registry/client" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerImageSource struct { + ref dockerReference + c *dockerClient + // State + cachedManifest []byte // nil if not loaded yet + cachedManifestMIMEType string // Only valid if cachedManifest != nil +} + +// newImageSource creates a new ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) + if err != nil { + return nil, errors.Wrapf(err, "error loading registries configuration") + } + if registry == nil { + // No configuration was found for the provided reference, so use the + // equivalent of a default configuration. + registry = &sysregistriesv2.Registry{ + Endpoint: sysregistriesv2.Endpoint{ + Location: ref.ref.String(), + }, + Prefix: ref.ref.String(), + } + } + + primaryDomain := reference.Domain(ref.ref) + // Check all endpoints for the manifest availability. If we find one that does + // contain the image, it will be used for all future pull actions. Always try the + // non-mirror original location last; this both transparently handles the case + // of no mirrors configured, and ensures we return the error encountered when + // acessing the upstream location if all endpoints fail. + manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint") + pullSources, err := registry.PullSourcesFromReference(ref.ref) + if err != nil { + return nil, err + } + for _, pullSource := range pullSources { + logrus.Debugf("Trying to pull %q", pullSource.Reference) + dockerRef, err := newReference(pullSource.Reference) + if err != nil { + return nil, err + } + + endpointSys := sys + // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. + if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain { + copy := *endpointSys + copy.DockerAuthConfig = nil + endpointSys = © + } + + client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull") + if err != nil { + return nil, err + } + client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure + + testImageSource := &dockerImageSource{ + ref: dockerRef, + c: client, + } + + manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx) + if manifestLoadErr == nil { + return testImageSource, nil + } + } + return nil, manifestLoadErr +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dockerImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dockerImageSource) Close() error { + return nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *dockerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} + +// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) +// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. +func simplifyContentType(contentType string) string { + if contentType == "" { + return contentType + } + mimeType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return "" + } + return mimeType +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return s.fetchManifest(ctx, instanceDigest.String()) + } + err := s.ensureManifestIsLoaded(ctx) + if err != nil { + return nil, "", err + } + return s.cachedManifest, s.cachedManifestMIMEType, nil +} + +func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { + path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name()) + } + manblob, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, "", err + } + return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil +} + +// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType +// +// ImageSource implementations are not required or expected to do any caching, +// but because our signatures are “attached” to the manifest digest, +// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) +// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious +// signature verification failures when pulling while a tag is being updated. +func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { + if s.cachedManifest != nil { + return nil + } + + reference, err := s.ref.tagOrDigest() + if err != nil { + return err + } + + manblob, mt, err := s.fetchManifest(ctx, reference) + if err != nil { + return err + } + // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. + s.cachedManifest = manblob + s.cachedManifestMIMEType = mt + return nil +} + +func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + var ( + resp *http.Response + err error + ) + for _, url := range urls { + resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err == nil { + if resp.StatusCode != http.StatusOK { + err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode)) + logrus.Debug(err) + continue + } + break + } + } + if err != nil { + return nil, 0, err + } + return resp.Body, getBlobSize(resp), nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dockerImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + return s.getExternalBlob(ctx, info.URLs) + } + + path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, 0, err + } + if res.StatusCode != http.StatusOK { + // print url also + return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } + cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref)) + return res.Body, getBlobSize(res), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if err := s.c.detectProperties(ctx); err != nil { + return nil, err + } + switch { + case s.c.signatureBase != nil: + return s.getSignaturesFromLookaside(ctx, instanceDigest) + case s.c.supportsSignatures: + return s.getSignaturesFromAPIExtension(ctx, instanceDigest) + default: + return [][]byte{}, nil + } +} + +// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, +// or finally, from a fetched manifest. +func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { + if instanceDigest != nil { + return *instanceDigest, nil + } + if digested, ok := s.ref.ref.(reference.Digested); ok { + d := digested.Digest() + if d.Algorithm() == digest.Canonical { + return d, nil + } + } + if err := s.ensureManifestIsLoaded(ctx); err != nil { + return "", err + } + return manifest.Digest(s.cachedManifest) +} + +// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, +// which is not nil. +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + signatures := [][]byte{} + for i := 0; ; i++ { + url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) + if url == nil { + return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + signature, missing, err := s.getOneSignature(ctx, url) + if err != nil { + return nil, err + } + if missing { + break + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// getOneSignature downloads one signature from url. +// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { + switch url.Scheme { + case "file": + logrus.Debugf("Reading %s", url.Path) + sig, err := ioutil.ReadFile(url.Path) + if err != nil { + if os.IsNotExist(err) { + return nil, true, nil + } + return nil, false, err + } + return sig, false, nil + + case "http", "https": + logrus.Debugf("GET %s", url) + req, err := http.NewRequest("GET", url.String(), nil) + if err != nil { + return nil, false, err + } + req = req.WithContext(ctx) + res, err := s.c.client.Do(req) + if err != nil { + return nil, false, err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return nil, true, nil + } else if res.StatusCode != http.StatusOK { + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + } + sig, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, false, err + } + return sig, false, nil + + default: + return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) + } +} + +// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest) + if err != nil { + return nil, err + } + + var sigs [][]byte + for _, sig := range parsedBody.Signatures { + if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// deleteImage deletes the named image from the registry, if supported. +func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { + // docker/distribution does not document what action should be used for deleting images. + // + // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. + // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. + // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). + // + // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". + c, err := newDockerClientFromRef(sys, ref, true, "*") + if err != nil { + return err + } + + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + refTail, err := ref.tagOrDigest() + if err != nil { + return err + } + getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) + get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer get.Body.Close() + manifestBody, err := ioutil.ReadAll(get.Body) + if err != nil { + return err + } + switch get.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) + default: + return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) + } + + digest := get.Header.Get("Docker-Content-Digest") + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + + // When retrieving the digest from a registry >= 2.3 use the following header: + // "Accept": "application/vnd.docker.distribution.manifest.v2+json" + delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer delete.Body.Close() + + body, err := ioutil.ReadAll(delete.Body) + if err != nil { + return err + } + if delete.StatusCode != http.StatusAccepted { + return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) + } + + if c.signatureBase != nil { + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return err + } + + for i := 0; ; i++ { + url := signatureStorageURL(c.signatureBase, manifestDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + missing, err := c.deleteOneSignature(url) + if err != nil { + return err + } + if missing { + break + } + } + } + + return nil +} diff --git a/vendor/github.com/containers/image/v4/docker/docker_transport.go b/vendor/github.com/containers/image/v4/docker/docker_transport.go new file mode 100644 index 000000000..c9ce75e0d --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/docker_transport.go @@ -0,0 +1,168 @@ +package docker + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/image/v4/docker/policyconfiguration" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for Docker registry-hosted images. +var Transport = dockerTransport{} + +type dockerTransport struct{} + +func (t dockerTransport) Name() string { + return "docker" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// dockerReference is an ImageReference for Docker images. +type dockerReference struct { + ref reference.Named // By construction we know that !reference.IsNameOnly(ref) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if !strings.HasPrefix(refString, "//") { + return nil, errors.Errorf("docker: image reference %s does not start with //", refString) + } + ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + return NewReference(ref) +} + +// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). +func NewReference(ref reference.Named) (types.ImageReference, error) { + return newReference(ref) +} + +// newReference returns a dockerReference for a named reference. +func newReference(ref reference.Named) (dockerReference, error) { + if reference.IsNameOnly(ref) { + return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // The docker/distribution API does not really support that (we can’t ask for an image with a specific + // tag and digest), so fail. This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported") + } + + return dockerReference{ + ref: ref, + }, nil +} + +func (ref dockerReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dockerReference) StringWithinTransport() string { + return "//" + reference.FamiliarString(ref.ref) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dockerReference) DockerReference() reference.Named { + return ref.ref +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dockerReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dockerReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.ref) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return deleteImage(ctx, sys, ref) +} + +// tagOrDigest returns a tag or digest from the reference. +func (ref dockerReference) tagOrDigest() (string, error) { + if ref, ok := ref.ref.(reference.Canonical); ok { + return ref.Digest().String(), nil + } + if ref, ok := ref.ref.(reference.NamedTagged); ok { + return ref.Tag(), nil + } + // This should not happen, NewReference above refuses reference.IsNameOnly values. + return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) +} diff --git a/vendor/github.com/containers/image/v4/docker/lookaside.go b/vendor/github.com/containers/image/v4/docker/lookaside.go new file mode 100644 index 000000000..c43160f72 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/lookaside.go @@ -0,0 +1,202 @@ +package docker + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/types" + "github.com/ghodss/yaml" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' +var systemRegistriesDirPath = builtinRegistriesDirPath + +// builtinRegistriesDirPath is the path to registries.d. +// DO NOT change this, instead see systemRegistriesDirPath above. +const builtinRegistriesDirPath = "/etc/containers/registries.d" + +// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. +// NOTE: Keep this in sync with docs/registries.d.md! +type registryConfiguration struct { + DefaultDocker *registryNamespace `json:"default-docker"` + // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), + Docker map[string]registryNamespace `json:"docker"` +} + +// registryNamespace defines lookaside locations for a single namespace. +type registryNamespace struct { + SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. + SigStoreStaging string `json:"sigstore-staging"` // For writing only. +} + +// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. +// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below. +type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported. + +// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. +func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) { + // FIXME? Loading and parsing the config could be cached across calls. + dirPath := registriesDirPath(sys) + logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) + config, err := loadAndMergeConfig(dirPath) + if err != nil { + return nil, err + } + + topLevel := config.signatureTopLevel(ref, write) + if topLevel == "" { + return nil, nil + } + + url, err := url.Parse(topLevel) + if err != nil { + return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) + } + // NOTE: Keep this in sync with docs/signature-protocols.md! + // FIXME? Restrict to explicitly supported schemes? + repo := reference.Path(ref.ref) // Note that this is without a tag or digest. + if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references + return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) + } + url.Path = url.Path + "/" + repo + return url, nil +} + +// registriesDirPath returns a path to registries.d +func registriesDirPath(sys *types.SystemContext) string { + if sys != nil { + if sys.RegistriesDirPath != "" { + return sys.RegistriesDirPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) + } + } + return systemRegistriesDirPath +} + +// loadAndMergeConfig loads configuration files in dirPath +func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { + mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} + dockerDefaultMergedFrom := "" + nsMergedFrom := map[string]string{} + + dir, err := os.Open(dirPath) + if err != nil { + if os.IsNotExist(err) { + return &mergedConfig, nil + } + return nil, err + } + configNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, configName := range configNames { + if !strings.HasSuffix(configName, ".yaml") { + continue + } + configPath := filepath.Join(dirPath, configName) + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + + var config registryConfiguration + err = yaml.Unmarshal(configBytes, &config) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing %s", configPath) + } + + if config.DefaultDocker != nil { + if mergedConfig.DefaultDocker != nil { + return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + dockerDefaultMergedFrom, configPath) + } + mergedConfig.DefaultDocker = config.DefaultDocker + dockerDefaultMergedFrom = configPath + } + + for nsName, nsConfig := range config.Docker { // includes config.Docker == nil + if _, ok := mergedConfig.Docker[nsName]; ok { + return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + nsName, nsMergedFrom[nsName], configPath) + } + mergedConfig.Docker[nsName] = nsConfig + nsMergedFrom[nsName] = configPath + } + } + + return &mergedConfig, nil +} + +// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. +// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used. +func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { + if config.Docker != nil { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if ns, ok := config.Docker[identity]; ok { + logrus.Debugf(` Using "docker" namespace %s`, identity) + if url := ns.signatureTopLevel(write); url != "" { + return url + } + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if ns, ok := config.Docker[name]; ok { + logrus.Debugf(` Using "docker" namespace %s`, name) + if url := ns.signatureTopLevel(write); url != "" { + return url + } + } + } + } + // Look for a default location + if config.DefaultDocker != nil { + logrus.Debugf(` Using "default-docker" configuration`) + if url := config.DefaultDocker.signatureTopLevel(write); url != "" { + return url + } + } + logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity()) + return "" +} + +// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. +// or "" if nothing has been configured. +func (ns registryNamespace) signatureTopLevel(write bool) string { + if write && ns.SigStoreStaging != "" { + logrus.Debugf(` Using %s`, ns.SigStoreStaging) + return ns.SigStoreStaging + } + if ns.SigStore != "" { + logrus.Debugf(` Using %s`, ns.SigStore) + return ns.SigStore + } + return "" +} + +// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. +// Returns nil iff base == nil. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { + if base == nil { + return nil + } + url := *base + url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) + return &url +} diff --git a/vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go new file mode 100644 index 000000000..e2ed631c5 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go @@ -0,0 +1,56 @@ +package policyconfiguration + +import ( + "strings" + + "github.com/containers/image/v4/docker/reference" + "github.com/pkg/errors" +) + +// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceIdentity(ref reference.Named) (string, error) { + res := ref.Name() + tagged, isTagged := ref.(reference.NamedTagged) + digested, isDigested := ref.(reference.Canonical) + switch { + case isTagged && isDigested: // Note that this CAN actually happen. + return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) + case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() + return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) + case isTagged: + res = res + ":" + tagged.Tag() + case isDigested: + res = res + "@" + digested.Digest().String() + default: // Coverage: The above was supposed to be exhaustive. + return "", errors.New("Internal inconsistency, unexpected default branch") + } + return res, nil +} + +// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceNamespaces(ref reference.Named) []string { + // Look for a match of the repository, and then of the possible parent + // namespaces. Note that this only happens on the expanded host names + // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", + // then in its parent "docker.io/library"; in none of "busybox", + // un-namespaced "library" nor in "" supposedly implicitly representing "library/". + // + // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last + // iteration matches the host name (for any namespace). + res := []string{} + name := ref.Name() + for { + res = append(res, name) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} diff --git a/vendor/github.com/containers/image/v4/docker/reference/README.md b/vendor/github.com/containers/image/v4/docker/reference/README.md new file mode 100644 index 000000000..3c4d74eb4 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/reference/README.md @@ -0,0 +1,2 @@ +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, +except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v4/docker/reference/helpers.go b/vendor/github.com/containers/image/v4/docker/reference/helpers.go new file mode 100644 index 000000000..978df7eab --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containers/image/v4/docker/reference/normalize.go b/vendor/github.com/containers/image/v4/docker/reference/normalize.go new file mode 100644 index 000000000..6a86ec64f --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/reference/normalize.go @@ -0,0 +1,181 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containers/image/v4/docker/reference/reference.go b/vendor/github.com/containers/image/v4/docker/reference/reference.go new file mode 100644 index 000000000..8c0c23b2f --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containers/image/v4/docker/reference/regexp.go b/vendor/github.com/containers/image/v4/docker/reference/regexp.go new file mode 100644 index 000000000..786034932 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/dest.go b/vendor/github.com/containers/image/v4/docker/tarfile/dest.go new file mode 100644 index 000000000..aec8404b6 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/tarfile/dest.go @@ -0,0 +1,407 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/internal/tmpdir" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. +type Destination struct { + writer io.Writer + tar *tar.Writer + repoTags []reference.NamedTagged + // Other state. + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs + config []byte +} + +// NewDestination returns a tarfile.Destination for the specified io.Writer. +func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination { + repoTags := []reference.NamedTagged{} + if ref != nil { + repoTags = append(repoTags, ref) + } + return &Destination{ + writer: dest, + tar: tar.NewWriter(dest), + repoTags: repoTags, + blobs: make(map[digest.Digest]types.BlobInfo), + } +} + +// AddRepoTags adds the specified tags to the destination's repoTags. +func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { + d.repoTags = append(d.repoTags, tags...) +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (d *Destination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *Destination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Storing signatures for docker tar files is not supported") +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *Destination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *Destination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *Destination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *Destination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Ouch, we need to stream the blob into a temporary file just to determine the size. + // When the layer is decompressed, we also have to generate the digest on uncompressed datas. + if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { + logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") + streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") + if err != nil { + return types.BlobInfo{}, err + } + defer os.Remove(streamCopy.Name()) + defer streamCopy.Close() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(streamCopy, tee) + if err != nil { + return types.BlobInfo{}, err + } + _, err = streamCopy.Seek(0, os.SEEK_SET) + if err != nil { + return types.BlobInfo{}, err + } + inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. + if inputInfo.Digest == "" { + inputInfo.Digest = digester.Digest() + } + stream = streamCopy + logrus.Debugf("... streaming done") + } + + // Maybe the blob has been already sent + ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false) + if err != nil { + return types.BlobInfo{}, err + } + if ok { + return reusedInfo, nil + } + + if isConfig { + buf, err := ioutil.ReadAll(stream) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream") + } + d.config = buf + if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file") + } + } else { + // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way + // writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described + // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) + // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers + // in the root of the tarball. + if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil { + return types.BlobInfo{}, err + } + } + d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size} + return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") + } + if blob, ok := d.blobs[info.Digest]; ok { + return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil + } + return false, types.BlobInfo{}, nil +} + +func (d *Destination) createRepositoriesFile(rootLayerID string) error { + repositories := map[string]map[string]string{} + for _, repoTag := range d.repoTags { + if val, ok := repositories[repoTag.Name()]; ok { + val[repoTag.Tag()] = rootLayerID + } else { + repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID} + } + } + + b, err := json.Marshal(repositories) + if err != nil { + return errors.Wrap(err, "Error marshaling repositories") + } + if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil { + return errors.Wrap(err, "Error writing config json file") + } + return nil +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *Destination) PutManifest(ctx context.Context, m []byte) error { + // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, + // so the caller trying a different manifest kind would be pointless. + var man manifest.Schema2 + if err := json.Unmarshal(m, &man); err != nil { + return errors.Wrap(err, "Error parsing manifest") + } + if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { + return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") + } + + layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors) + if err != nil { + return err + } + + if len(man.LayersDescriptors) > 0 { + if err := d.createRepositoriesFile(lastLayerID); err != nil { + return err + } + } + + repoTags := []string{} + for _, tag := range d.repoTags { + // For github.com/docker/docker consumers, this works just as well as + // refString := ref.String() + // because when reading the RepoTags strings, github.com/docker/docker/reference + // normalizes both of them to the same value. + // + // Doing it this way to include the normalized-out `docker.io[/library]` does make + // a difference for github.com/projectatomic/docker consumers, with the + // “Add --add-registry and --block-registry options to docker daemon” patch. + // These consumers treat reference strings which include a hostname and reference + // strings without a hostname differently. + // + // Using the host name here is more explicit about the intent, and it has the same + // effect as (docker pull) in projectatomic/docker, which tags the result using + // a hostname-qualified reference. + // See https://github.com/containers/image/issues/72 for a more detailed + // analysis and explanation. + refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) + repoTags = append(repoTags, refString) + } + + items := []ManifestItem{{ + Config: man.ConfigDescriptor.Digest.Hex() + ".json", + RepoTags: repoTags, + Layers: layerPaths, + Parent: "", + LayerSources: nil, + }} + itemsBytes, err := json.Marshal(&items) + if err != nil { + return err + } + + // FIXME? Do we also need to support the legacy format? + return d.sendBytes(manifestFileName, itemsBytes) +} + +// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers +func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) { + var chainID digest.Digest + lastLayerID = "" + for i, l := range layerDescriptors { + // This chainID value matches the computation in docker/docker/layer.CreateChainID … + if chainID == "" { + chainID = l.Digest + } else { + chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) + } + // … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent + // versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop. + // + // Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID + // (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more + // times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation + // which also mixes in the full image configuration seems unnecessary, at least as long as we are storing + // only a single image per tarball, i.e. all DiffID prefixes are unique (can’t differ only with + // configuration). + layerID := chainID.Hex() + + physicalLayerPath := l.Digest.Hex() + ".tar" + // The layer itself has been stored into physicalLayerPath in PutManifest. + // So, use that path for layerPaths used in the non-legacy manifest + layerPaths = append(layerPaths, physicalLayerPath) + // ... and create a symlink for the legacy format; + if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { + return nil, "", errors.Wrap(err, "Error creating layer symbolic link") + } + + b := []byte("1.0") + if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil { + return nil, "", errors.Wrap(err, "Error writing VERSION file") + } + + // The legacy format requires a config file per layer + layerConfig := make(map[string]interface{}) + layerConfig["id"] = layerID + + // The root layer doesn't have any parent + if lastLayerID != "" { + layerConfig["parent"] = lastLayerID + } + // The root layer configuration file is generated by using subpart of the image configuration + if i == len(layerDescriptors)-1 { + var config map[string]*json.RawMessage + err := json.Unmarshal(d.config, &config) + if err != nil { + return nil, "", errors.Wrap(err, "Error unmarshaling config") + } + for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { + layerConfig[attr] = config[attr] + } + } + b, err := json.Marshal(layerConfig) + if err != nil { + return nil, "", errors.Wrap(err, "Error marshaling layer config") + } + if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil { + return nil, "", errors.Wrap(err, "Error writing config json file") + } + + lastLayerID = layerID + } + return layerPaths, lastLayerID, nil +} + +type tarFI struct { + path string + size int64 + isSymlink bool +} + +func (t *tarFI) Name() string { + return t.path +} +func (t *tarFI) Size() int64 { + return t.size +} +func (t *tarFI) Mode() os.FileMode { + if t.isSymlink { + return os.ModeSymlink + } + return 0444 +} +func (t *tarFI) ModTime() time.Time { + return time.Unix(0, 0) +} +func (t *tarFI) IsDir() bool { + return false +} +func (t *tarFI) Sys() interface{} { + return nil +} + +// sendSymlink sends a symlink into the tar stream. +func (d *Destination) sendSymlink(path string, target string) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) + if err != nil { + return nil + } + logrus.Debugf("Sending as tar link %s -> %s", path, target) + return d.tar.WriteHeader(hdr) +} + +// sendBytes sends a path into the tar stream. +func (d *Destination) sendBytes(path string, b []byte) error { + return d.sendFile(path, int64(len(b)), bytes.NewReader(b)) +} + +// sendFile sends a file into the tar stream. +func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") + if err != nil { + return nil + } + logrus.Debugf("Sending as tar file %s", path) + if err := d.tar.WriteHeader(hdr); err != nil { + return err + } + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + size, err := io.Copy(d.tar, stream) + if err != nil { + return err + } + if size != expectedSize { + return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + } + return nil +} + +// PutSignatures adds the given signatures to the docker tarfile (currently not +// supported). MUST be called after PutManifest (signatures reference manifest +// contents) +func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte) error { + if len(signatures) != 0 { + return errors.Errorf("Storing signatures for docker tar files is not supported") + } + return nil +} + +// Commit finishes writing data to the underlying io.Writer. +// It is the caller's responsibility to close it, if necessary. +func (d *Destination) Commit(ctx context.Context) error { + return d.tar.Close() +} diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/doc.go b/vendor/github.com/containers/image/v4/docker/tarfile/doc.go new file mode 100644 index 000000000..4ea5369c0 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/tarfile/doc.go @@ -0,0 +1,3 @@ +// Package tarfile is an internal implementation detail of some transports. +// Do not use outside of the github.com/containers/image repo! +package tarfile diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/src.go b/vendor/github.com/containers/image/v4/docker/tarfile/src.go new file mode 100644 index 000000000..78e4d6f65 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/tarfile/src.go @@ -0,0 +1,478 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + "sync" + + "github.com/containers/image/v4/internal/tmpdir" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/pkg/compression" + "github.com/containers/image/v4/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Source is a partial implementation of types.ImageSource for reading from tarPath. +type Source struct { + tarPath string + removeTarPathOnClose bool // Remove temp file on close if true + // The following data is only available after ensureCachedDataIsPresent() succeeds + tarManifest *ManifestItem // nil if not available yet. + configBytes []byte + configDigest digest.Digest + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo + // Other state + generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. + cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe + cacheDataResult error // Private state for ensureCachedDataIsPresent +} + +type layerInfo struct { + path string + size int64 +} + +// TODO: We could add support for multiple images in a single archive, so +// that people could use docker-archive:opensuse.tar:opensuse:leap as +// the source of an image. +// To do for both the NewSourceFromFile and NewSourceFromStream functions + +// NewSourceFromFile returns a tarfile.Source for the specified path. +func NewSourceFromFile(path string) (*Source, error) { + file, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "error opening file %q", path) + } + defer file.Close() + + // If the file is already not compressed we can just return the file itself + // as a source. Otherwise we pass the stream to NewSourceFromStream. + stream, isCompressed, err := compression.AutoDecompress(file) + if err != nil { + return nil, errors.Wrapf(err, "Error detecting compression for file %q", path) + } + defer stream.Close() + if !isCompressed { + return &Source{ + tarPath: path, + }, nil + } + return NewSourceFromStream(stream) +} + +// NewSourceFromStream returns a tarfile.Source for the specified inputStream, +// which can be either compressed or uncompressed. The caller can close the +// inputStream immediately after NewSourceFromFile returns. +func NewSourceFromStream(inputStream io.Reader) (*Source, error) { + // FIXME: use SystemContext here. + // Save inputStream to a temporary file + tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar") + if err != nil { + return nil, errors.Wrap(err, "error creating temporary file") + } + defer tarCopyFile.Close() + + succeeded := false + defer func() { + if !succeeded { + os.Remove(tarCopyFile.Name()) + } + }() + + // In order to be compatible with docker-load, we need to support + // auto-decompression (it's also a nice quality-of-life thing to avoid + // giving users really confusing "invalid tar header" errors). + uncompressedStream, _, err := compression.AutoDecompress(inputStream) + if err != nil { + return nil, errors.Wrap(err, "Error auto-decompressing input") + } + defer uncompressedStream.Close() + + // Copy the plain archive to the temporary file. + // + // TODO: This can take quite some time, and should ideally be cancellable + // using a context.Context. + if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { + return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name()) + } + succeeded = true + + return &Source{ + tarPath: tarCopyFile.Name(), + removeTarPathOnClose: true, + }, nil +} + +// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. +type tarReadCloser struct { + *tar.Reader + backingFile *os.File +} + +func (t *tarReadCloser) Close() error { + return t.backingFile.Close() +} + +// openTarComponent returns a ReadCloser for the specific file within the archive. +// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), +// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. +// The caller should call .Close() on the returned stream. +func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) { + f, err := os.Open(s.tarPath) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + f.Close() + } + }() + + tarReader, header, err := findTarComponent(f, componentPath) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested + // We follow only one symlink; so no loops are possible. + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, + // so we don't care. + tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + } + + if !header.FileInfo().Mode().IsRegular() { + return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + } + succeeded = true + return &tarReadCloser{Reader: tarReader, backingFile: f}, nil +} + +// findTarComponent returns a header and a reader matching path within inputFile, +// or (nil, nil, nil) if not found. +func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { + t := tar.NewReader(inputFile) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, err + } + if h.Name == path { + return t, h, nil + } + } + return nil, nil, nil +} + +// readTarComponent returns full contents of componentPath. +func (s *Source) readTarComponent(path string) ([]byte, error) { + file, err := s.openTarComponent(path) + if err != nil { + return nil, errors.Wrapf(err, "Error loading tar component %s", path) + } + defer file.Close() + bytes, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ensureCachedDataIsPresent loads data necessary for any of the public accessors. +// It is safe to call this from multi-threaded code. +func (s *Source) ensureCachedDataIsPresent() error { + s.cacheDataLock.Do(func() { + s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() + }) + return s.cacheDataResult +} + +// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. +// Call ensureCachedDataIsPresent instead. +func (s *Source) ensureCachedDataIsPresentPrivate() error { + // Read and parse manifest.json + tarManifest, err := s.loadTarManifest() + if err != nil { + return err + } + + // Check to make sure length is 1 + if len(tarManifest) != 1 { + return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) + } + + // Read and parse config. + configBytes, err := s.readTarComponent(tarManifest[0].Config) + if err != nil { + return err + } + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. + if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { + return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) + } + + knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) + if err != nil { + return err + } + + // Success; commit. + s.tarManifest = &tarManifest[0] + s.configBytes = configBytes + s.configDigest = digest.FromBytes(configBytes) + s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs + s.knownLayers = knownLayers + return nil +} + +// loadTarManifest loads and decodes the manifest.json. +func (s *Source) loadTarManifest() ([]ManifestItem, error) { + // FIXME? Do we need to deal with the legacy format? + bytes, err := s.readTarComponent(manifestFileName) + if err != nil { + return nil, err + } + var items []ManifestItem + if err := json.Unmarshal(bytes, &items); err != nil { + return nil, errors.Wrap(err, "Error decoding tar manifest.json") + } + return items, nil +} + +// Close removes resources associated with an initialized Source, if any. +func (s *Source) Close() error { + if s.removeTarPathOnClose { + return os.Remove(s.tarPath) + } + return nil +} + +// LoadTarManifest loads and decodes the manifest.json +func (s *Source) LoadTarManifest() ([]ManifestItem, error) { + return s.loadTarManifest() +} + +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { + // Collect layer data available in manifest and config. + if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { + return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + } + knownLayers := map[digest.Digest]*layerInfo{} + unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. + for i, diffID := range parsedConfig.RootFS.DiffIDs { + if _, ok := knownLayers[diffID]; ok { + // Apparently it really can happen that a single image contains the same layer diff more than once. + // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter + // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. + continue + } + layerPath := tarManifest.Layers[i] + if _, ok := unknownLayerSizes[layerPath]; ok { + return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + } + li := &layerInfo{ // A new element in each iteration + path: layerPath, + size: -1, + } + knownLayers[diffID] = li + unknownLayerSizes[layerPath] = li + } + + // Scan the tar file to collect layer sizes. + file, err := os.Open(s.tarPath) + if err != nil { + return nil, err + } + defer file.Close() + t := tar.NewReader(file) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if li, ok := unknownLayerSizes[h.Name]; ok { + // Since GetBlob will decompress layers that are compressed we need + // to do the decompression here as well, otherwise we will + // incorrectly report the size. Pretty critical, since tools like + // umoci always compress layer blobs. Obviously we only bother with + // the slower method of checking if it's compressed. + uncompressedStream, isCompressed, err := compression.AutoDecompress(t) + if err != nil { + return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name) + } + defer uncompressedStream.Close() + + uncompressedSize := h.Size + if isCompressed { + uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + if err != nil { + return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name) + } + } + li.size = uncompressedSize + delete(unknownLayerSizes, h.Name) + } + } + if len(unknownLayerSizes) != 0 { + return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + } + + return knownLayers, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } + if s.generatedManifest == nil { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, "", err + } + m := manifest.Schema2{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: manifest.Schema2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: int64(len(s.configBytes)), + Digest: s.configDigest, + }, + LayersDescriptors: []manifest.Schema2Descriptor{}, + } + for _, diffID := range s.orderedDiffIDList { + li, ok := s.knownLayers[diffID] + if !ok { + return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + } + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball + MediaType: manifest.DockerV2Schema2LayerMediaType, + Size: li.size, + }) + } + manifestBytes, err := json.Marshal(&m) + if err != nil { + return nil, "", err + } + s.generatedManifest = manifestBytes + } + return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil +} + +// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. +type uncompressedReadCloser struct { + io.Reader + underlyingCloser func() error + uncompressedCloser func() error +} + +func (r uncompressedReadCloser) Close() error { + var res error + if err := r.uncompressedCloser(); err != nil { + res = err + } + if err := r.underlyingCloser(); err != nil && res == nil { + res = err + } + return res +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *Source) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, 0, err + } + + if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. + return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + } + + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, + underlyingStream, err := s.openTarComponent(li.path) + if err != nil { + return nil, 0, err + } + closeUnderlyingStream := true + defer func() { + if closeUnderlyingStream { + underlyingStream.Close() + } + }() + + // In order to handle the fact that digests != diffIDs (and thus that a + // caller which is trying to verify the blob will run into problems), + // we need to decompress blobs. This is a bit ugly, but it's a + // consequence of making everything addressable by their DiffID rather + // than by their digest... + // + // In particular, because the v2s2 manifest being generated uses + // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of + // layers not their _actual_ digest. The result is that copy/... will + // be verifing a "digest" which is not the actual layer's digest (but + // is instead the DiffID). + + uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) + if err != nil { + return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest) + } + + newStream := uncompressedReadCloser{ + Reader: uncompressedStream, + underlyingCloser: underlyingStream.Close, + uncompressedCloser: uncompressedStream.Close, + } + closeUnderlyingStream = false + + return newStream, li.size, nil + } + + return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } + return [][]byte{}, nil +} diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/types.go b/vendor/github.com/containers/image/v4/docker/tarfile/types.go new file mode 100644 index 000000000..c630f5227 --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/tarfile/types.go @@ -0,0 +1,28 @@ +package tarfile + +import ( + "github.com/containers/image/v4/manifest" + "github.com/opencontainers/go-digest" +) + +// Various data structures. + +// Based on github.com/docker/docker/image/tarexport/tarexport.go +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +// ManifestItem is an element of the array stored in the top-level manifest.json file. +type ManifestItem struct { + Config string + RepoTags []string + Layers []string + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` +} + +type imageID string diff --git a/vendor/github.com/containers/image/v4/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v4/docker/wwwauthenticate.go new file mode 100644 index 000000000..23664a74a --- /dev/null +++ b/vendor/github.com/containers/image/v4/docker/wwwauthenticate.go @@ -0,0 +1,159 @@ +package docker + +// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. + +import ( + "net/http" + "strings" +) + +// challenge carries information from a WWW-Authenticate response header. +// See RFC 7235. +type challenge struct { + // Scheme is the auth-scheme according to RFC 7235 + Scheme string + + // Parameters are the auth-params according to RFC 7235 + Parameters map[string]string +} + +// Octet types from RFC 7230. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []challenge { + challenges := []challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +// NOTE: This is not a fully compliant parser per RFC 7235: +// Most notably it does not support more than one challenge within a single header +// Some of the whitespace parsing also seems noncompliant. +// But it is clearly better than what we used to have… +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containers/image/v4/image/docker_list.go b/vendor/github.com/containers/image/v4/image/docker_list.go new file mode 100644 index 000000000..a11cd06b9 --- /dev/null +++ b/vendor/github.com/containers/image/v4/image/docker_list.go @@ -0,0 +1,94 @@ +package image + +import ( + "context" + "encoding/json" + "fmt" + "runtime" + + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type platformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// A manifestDescriptor references a platform-specific manifest. +type manifestDescriptor struct { + manifest.Schema2Descriptor + Platform platformSpec `json:"platform"` +} + +type manifestList struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []manifestDescriptor `json:"manifests"` +} + +// chooseDigestFromManifestList parses blob as a schema2 manifest list, +// and returns the digest of the image appropriate for the current environment. +func chooseDigestFromManifestList(sys *types.SystemContext, blob []byte) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if sys != nil && sys.ArchitectureChoice != "" { + wantedArch = sys.ArchitectureChoice + } + wantedOS := runtime.GOOS + if sys != nil && sys.OSChoice != "" { + wantedOS = sys.OSChoice + } + + list := manifestList{} + if err := json.Unmarshal(blob, &list); err != nil { + return "", err + } + for _, d := range list.Manifests { + if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) +} + +func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + targetManifestDigest, err := chooseDigestFromManifestList(sys, manblob) + if err != nil { + return nil, err + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, err + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} + +// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate +// for the current system from the manifest available from src. +func ChooseManifestInstanceFromManifestList(ctx context.Context, sys *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { + // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, + // probably along with manifest list editing. + blob, mt, err := src.Manifest(ctx) + if err != nil { + return "", err + } + if mt != manifest.DockerV2ListMediaType { + return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) + } + return chooseDigestFromManifestList(sys, blob) +} diff --git a/vendor/github.com/containers/image/v4/image/docker_schema1.go b/vendor/github.com/containers/image/v4/image/docker_schema1.go new file mode 100644 index 000000000..97ebeac06 --- /dev/null +++ b/vendor/github.com/containers/image/v4/image/docker_schema1.go @@ -0,0 +1,202 @@ +package image + +import ( + "context" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestSchema1 struct { + m *manifest.Schema1 +} + +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { + m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema1) manifestMIMEType() string { + return manifest.DockerV2Schema1SignedMediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { + return nil, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + v2s2, err := m.convertToManifestSchema2(nil, nil) + if err != nil { + return nil, err + } + return v2s2.OCIConfig(ctx) +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + // This is a bit convoluted: We can’t just have a "get embedded docker reference" method + // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually + // embed a full docker/distribution reference, but only the repo name and tag (without the host name). + // So we would have to provide a “return repo without host name, and tag” getter for the generic code, + // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the + // generic copy code needs to know about is reference.Named and that a manifest may need updating + // for some destinations. + name := reference.Path(ref) + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } else { + tag = "" + } + return m.m.Name != name || m.m.Tag != tag +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { + return m.m.Inspect(nil) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + if options.EmbeddedDockerReference != nil { + copy.m.Name = reference.Path(options.EmbeddedDockerReference) + if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { + copy.m.Tag = tagged.Tag() + } else { + copy.m.Tag = "" + } + } + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + case manifest.DockerV2Schema2MediaType: + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return memoryImageFromManifest(m2), nil + case imgspecv1.MediaTypeImageManifest: + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ + ManifestMIMEType: imgspecv1.MediaTypeImageManifest, + InformationOnly: options.InformationOnly, + }) + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +// Based on github.com/docker/docker/distribution/pull_v2.go +func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { + if len(m.m.ExtractedV1Compatibility) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + } + if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) + } + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + } + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + } + + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index + + if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { + var size int64 + if uploadedLayerInfos != nil { + size = uploadedLayerInfos[v2Index].Size + } + var d digest.Digest + if layerDiffIDs != nil { + d = layerDiffIDs[v2Index] + } + layers = append(layers, manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Size: size, + Digest: m.m.FSLayers[v1Index].BlobSum, + }) + diffIDs = append(diffIDs, d) + } + } + configJSON, err := m.m.ToSchema2Config(diffIDs) + if err != nil { + return nil, err + } + configDescriptor := manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.container.image.v1+json", + Size: int64(len(configJSON)), + Digest: digest.FromBytes(configJSON), + } + + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil +} diff --git a/vendor/github.com/containers/image/v4/image/docker_schema2.go b/vendor/github.com/containers/image/v4/image/docker_schema2.go new file mode 100644 index 000000000..9841bbd42 --- /dev/null +++ b/vendor/github.com/containers/image/v4/image/docker_schema2.go @@ -0,0 +1,357 @@ +package image + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/pkg/blobinfocache/none" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 +} + +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema2{ + src: src, + m: m, + }, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + configBlob, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields + // than OCI v1. This unmarshal makes sure we drop docker v2s2 + // fields that aren't needed in OCI v1. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(configBlob, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: + return copy.convertToManifestSchema1(ctx, options.InformationOnly.Destination) + case imgspecv1.MediaTypeImageManifest: + return copy.convertToManifestOCI1(ctx) + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Image, error) { + configOCI, err := m.OCIConfig(ctx) + if err != nil { + return nil, err + } + configOCIBytes, err := json.Marshal(configOCI) + if err != nil { + return nil, err + } + + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + } + + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + for idx := range layers { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + switch m.m.LayersDescriptors[idx].MediaType { + case manifest.DockerV2Schema2ForeignLayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case manifest.DockerV2SchemaLayerMediaTypeUncompressed: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema2LayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) + } + } + + m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) + return memoryImageFromManifest(m1), nil +} + +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest types.ImageDestination) (types.Image, error) { + configBytes, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + imageConfig := &manifest.Schema2Image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) + if err != nil { + return nil, errors.Wrap(err, "Error uploading empty layer") + } + if info.Digest != GzippedEmptyLayerDigest { + return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest) + } + haveGzippedEmptyLayer = true + } + blobDigest = GzippedEmptyLayerDigest + } else { + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + } + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := manifest.Schema1V1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } + return memoryImageFromManifest(m1), nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]interface{}{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} diff --git a/vendor/github.com/containers/image/v4/image/manifest.go b/vendor/github.com/containers/image/v4/image/manifest.go new file mode 100644 index 000000000..f384d2fb8 --- /dev/null +++ b/vendor/github.com/containers/image/v4/image/manifest.go @@ -0,0 +1,73 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// genericManifest is an interface for parsing, modifying image manifests and related data. +// Note that the public methods are intended to be a subset of types.Image +// so that embedding a genericManifest into structs works. +// will support v1 one day... +type genericManifest interface { + serialize() ([]byte, error) + manifestMIMEType() string + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() types.BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*imgspecv1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*types.ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) +} + +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + return manifestSchema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return manifestOCI1FromManifest(src, manblob) + case manifest.DockerV2Schema2MediaType: + return manifestSchema2FromManifest(src, manblob) + case manifest.DockerV2ListMediaType: + return manifestSchema2FromManifestList(ctx, sys, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. +func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { + blobs := make([]types.BlobInfo, len(layers)) + for i, layer := range layers { + blobs[i] = layer.BlobInfo + } + return blobs +} diff --git a/vendor/github.com/containers/image/v4/image/memory.go b/vendor/github.com/containers/image/v4/image/memory.go new file mode 100644 index 000000000..255965e14 --- /dev/null +++ b/vendor/github.com/containers/image/v4/image/memory.go @@ -0,0 +1,65 @@ +package image + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/containers/image/v4/types" +) + +// memoryImage is a mostly-implementation of types.Image assembled from data +// created in memory, used primarily as a return value of types.Image.UpdatedImage +// as a way to carry various structured information in a type-safe and easy-to-use way. +// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone +// collection of all related information, e.g. there is no way to get layer blobs +// from a memoryImage. +type memoryImage struct { + genericManifest + serializedManifest []byte // A private cache for Manifest() +} + +func memoryImageFromManifest(m genericManifest) types.Image { + return &memoryImage{ + genericManifest: m, + serializedManifest: nil, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *memoryImage) Reference() types.ImageReference { + // It would really be inappropriate to return the ImageReference of the image this was based on. + return nil +} + +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + return -1, nil +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.serializedManifest == nil { + m, err := i.genericManifest.serialize() + if err != nil { + return nil, "", err + } + i.serializedManifest = m + } + return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { + // Modifying an image invalidates signatures; a caller asking the updated image for signatures + // is probably confused. + return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") +} + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v4/image/oci.go b/vendor/github.com/containers/image/v4/image/oci.go new file mode 100644 index 000000000..142b0f28f --- /dev/null +++ b/vendor/github.com/containers/image/v4/image/oci.go @@ -0,0 +1,214 @@ +package image + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/pkg/blobinfocache/none" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 +} + +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestOCI1{ + src: src, + m: m, + }, nil +} + +// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { + return &manifestOCI1{ + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), + } +} + +func (m *manifestOCI1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestOCI1) manifestMIMEType() string { + return imgspecv1.MediaTypeImageManifest +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestOCI1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + cb, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestOCI1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // We can't directly convert to V1, but we can transitively convert via a V2 image + m2, err := copy.convertToManifestSchema2() + if err != nil { + return nil, err + } + return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ + ManifestMIMEType: options.ManifestMIMEType, + InformationOnly: options.InformationOnly, + }) + case manifest.DockerV2Schema2MediaType: + return copy.convertToManifestSchema2() + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { + // Create a copy of the descriptor. + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + + // The only difference between OCI and DockerSchema2 is the mediatypes. The + // media type of the manifest is handled by manifestSchema2FromComponents. + config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + for idx := range layers { + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + switch layers[idx].MediaType { + case imgspecv1.MediaTypeImageLayerNonDistributable: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + case imgspecv1.MediaTypeImageLayer: + layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + case imgspecv1.MediaTypeImageLayerGzip: + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + case imgspecv1.MediaTypeImageLayerZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) + } + } + + // Rather than copying the ConfigBlob now, we just pass m.src to the + // translated manifest, since the only difference is the mediatype of + // descriptors there is no change to any blob stored in m.src. + m1 := manifestSchema2FromComponents(config, m.src, nil, layers) + return memoryImageFromManifest(m1), nil +} diff --git a/vendor/github.com/containers/image/v4/image/sourced.go b/vendor/github.com/containers/image/v4/image/sourced.go new file mode 100644 index 000000000..d2a3e2ee6 --- /dev/null +++ b/vendor/github.com/containers/image/v4/image/sourced.go @@ -0,0 +1,104 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v4/types" +) + +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() +} + +// sourcedImage is a general set of utilities for working with container images, +// whatever is their underlying location (i.e. dockerImageSource-independent). +// Note the existence of skopeo/docker.Image: some instances of a `types.Image` +// may not be a `sourcedImage` directly. However, most users of `types.Image` +// do not care, and those who care about `skopeo/docker.Image` know they do. +type sourcedImage struct { + *UnparsedImage + manifestBlob []byte + manifestMIMEType string + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &sourcedImage{ + UnparsedImage: unparsed, + manifestBlob: manifestBlob, + manifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *sourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { + return i.manifestBlob, i.manifestMIMEType, nil +} + +func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return i.UnparsedImage.src.LayerInfosForCopy(ctx) +} diff --git a/vendor/github.com/containers/image/v4/image/unparsed.go b/vendor/github.com/containers/image/v4/image/unparsed.go new file mode 100644 index 000000000..d73107654 --- /dev/null +++ b/vendor/github.com/containers/image/v4/image/unparsed.go @@ -0,0 +1,95 @@ +package image + +import ( + "context" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +type UnparsedImage struct { + src types.ImageSource + instanceDigest *digest.Digest + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. + return i.src.Reference() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go new file mode 100644 index 000000000..88e123cdd --- /dev/null +++ b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go @@ -0,0 +1,73 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +// Key represents a single key linked to one or more kernel keyrings. +type Key struct { + Name string + + id, ring keyID + size int +} + +// ID returns the 32-bit kernel identifier for a specific key +func (k *Key) ID() int32 { + return int32(k.id) +} + +// Get the key's value as a byte slice +func (k *Key) Get() ([]byte, error) { + var ( + b []byte + err error + sizeRead int + ) + + if k.size == 0 { + k.size = 512 + } + + size := k.size + + b = make([]byte, int(size)) + sizeRead = size + 1 + for sizeRead > size { + r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) + if err != nil { + return nil, err + } + + if sizeRead = int(r1); sizeRead > size { + b = make([]byte, sizeRead) + size = sizeRead + sizeRead = size + 1 + } else { + k.size = sizeRead + } + } + return b[:k.size], err +} + +// Unlink a key from the keyring it was loaded from (or added to). If the key +// is not linked to any other keyrings, it is destroyed. +func (k *Key) Unlink() error { + _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) + return err +} + +// Describe returns a string describing the attributes of a specified key +func (k *Key) Describe() (string, error) { + keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) + if err != nil { + return "", err + } + return keyAttr, nil +} diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go new file mode 100644 index 000000000..4bf170156 --- /dev/null +++ b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go @@ -0,0 +1,120 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) +// +// Deprecated: Most callers should use either golang.org/x/sys/unix directly, +// or the original (and more extensive) github.com/jsipprell/keyctl . +package keyctl + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Keyring is the basic interface to a linux keyctl keyring. +type Keyring interface { + ID + Add(string, []byte) (*Key, error) + Search(string) (*Key, error) +} + +type keyring struct { + id keyID +} + +// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. +type ID interface { + ID() int32 +} + +// Add a new key to a keyring. The key can be searched for later by name. +func (kr *keyring) Add(name string, key []byte) (*Key, error) { + r, err := unix.AddKey("user", name, key, int(kr.id)) + if err == nil { + key := &Key{Name: name, id: keyID(r), ring: kr.id} + return key, nil + } + return nil, err +} + +// Search for a key by name, this also searches child keyrings linked to this +// one. The key, if found, is linked to the top keyring that Search() was called +// from. +func (kr *keyring) Search(name string) (*Key, error) { + id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) + if err == nil { + return &Key{Name: name, id: keyID(id), ring: kr.id}, nil + } + return nil, err +} + +// ID returns the 32-bit kernel identifier of a keyring +func (kr *keyring) ID() int32 { + return int32(kr.id) +} + +// SessionKeyring returns the current login session keyring +func SessionKeyring() (Keyring, error) { + return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) +} + +// UserKeyring returns the keyring specific to the current user. +func UserKeyring() (Keyring, error) { + return newKeyring(unix.KEY_SPEC_USER_KEYRING) +} + +// Unlink an object from a keyring +func Unlink(parent Keyring, child ID) error { + _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) + return err +} + +// Link a key into a keyring +func Link(parent Keyring, child ID) error { + _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) + return err +} + +// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it +func ReadUserKeyring() ([]*Key, error) { + var ( + b []byte + err error + sizeRead int + ) + krSize := 4 + size := krSize + b = make([]byte, size) + sizeRead = size + 1 + for sizeRead > size { + r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) + if err != nil { + return nil, err + } + + if sizeRead = int(r1); sizeRead > size { + b = make([]byte, sizeRead) + size = sizeRead + sizeRead = size + 1 + } else { + krSize = sizeRead + } + } + keyIDs := getKeyIDsFromByte(b[:krSize]) + return keyIDs, err +} + +func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { + idSize := 4 + var keys []*Key + for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { + tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) + keys = append(keys, &Key{id: keyID(tempID)}) + } + return keys +} diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go new file mode 100644 index 000000000..ae9697149 --- /dev/null +++ b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go @@ -0,0 +1,33 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +// KeyPerm represents in-kernel access control permission to keys and keyrings +// as a 32-bit integer broken up into four permission sets, one per byte. +// In MSB order, the perms are: Processor, User, Group, Other. +type KeyPerm uint32 + +const ( + // PermOtherAll sets all permission for Other + PermOtherAll KeyPerm = 0x3f << (8 * iota) + // PermGroupAll sets all permission for Group + PermGroupAll + // PermUserAll sets all permission for User + PermUserAll + // PermProcessAll sets all permission for Processor + PermProcessAll +) + +// SetPerm sets the permissions on a key or keyring. +func SetPerm(k ID, p KeyPerm) error { + err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) + return err +} diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go new file mode 100644 index 000000000..196c82760 --- /dev/null +++ b/vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go @@ -0,0 +1,25 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +type keyID int32 + +func newKeyring(id keyID) (*keyring, error) { + r1, err := unix.KeyctlGetKeyringID(int(id), true) + if err != nil { + return nil, err + } + + if id < 0 { + r1 = int(id) + } + return &keyring{id: keyID(r1)}, nil +} diff --git a/vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go new file mode 100644 index 000000000..8c776929c --- /dev/null +++ b/vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go @@ -0,0 +1,29 @@ +package tmpdir + +import ( + "os" + "runtime" +) + +// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path' +var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles + +// builtinUnixTempDirForBigFiles is the directory path to store big files. +// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. +// DO NOT change this, instead see unixTempDirForBigFiles above. +const builtinUnixTempDirForBigFiles = "/var/tmp" + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func TemporaryDirectoryForBigFiles() string { + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = unixTempDirForBigFiles + } + return temporaryDirectoryForBigFiles +} diff --git a/vendor/github.com/containers/image/v4/manifest/docker_schema1.go b/vendor/github.com/containers/image/v4/manifest/docker_schema1.go new file mode 100644 index 000000000..3c172504a --- /dev/null +++ b/vendor/github.com/containers/image/v4/manifest/docker_schema1.go @@ -0,0 +1,316 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! + ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) + SchemaVersion int `json:"schemaVersion"` +} + +type schema1V1CompatibilityContainerConfig struct { + Cmd []string +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if err := s1.initialize(); err != nil { + return nil, err + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + s1 := Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } + if err := s1.initialize(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. +func (m *Schema1) initialize() error { + if len(m.FSLayers) != len(m.History) { + return errors.New("length of history not equal to number of layers") + } + if len(m.FSLayers) == 0 { + return errors.New("no FSLayers in manifest") + } + m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) + for i, h := range m.History { + if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { + return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i) + } + } + return nil +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []LayerInfo { + layers := make([]LayerInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, + EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, + } + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) + for i, info := range layerInfos { + // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + // m.initialize() has verified that len(m.FSLayers) == len(m.History) + for _, compat := range m.ExtractedV1Compatibility { + if err := validateV1ID(compat.ID); err != nil { + return err + } + } + if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range m.ExtractedV1Compatibility { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { + if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: m.Tag, + Created: &s1.Created, + DockerVersion: s1.DockerVersion, + Architecture: s1.Architecture, + Os: s1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s1.Config != nil { + i.Labels = s1.Config.Labels + i.Env = s1.Config.Env + } + return i, nil +} + +// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s1 := Schema2V1Image{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s1) + if err != nil { + return nil, errors.Wrapf(err, "error decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s1) + if err != nil { + return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, compat := range m.ExtractedV1Compatibility { + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2Config(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/v4/manifest/docker_schema2.go b/vendor/github.com/containers/image/v4/manifest/docker_schema2.go new file mode 100644 index 000000000..84b189c8e --- /dev/null +++ b/vendor/github.com/containers/image/v4/manifest/docker_schema2.go @@ -0,0 +1,349 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/containers/image/v4/pkg/compression" + "github.com/containers/image/v4/pkg/strslice" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. +func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + MediaType: desc.MediaType, + } +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedSchema2MediaType(s2.MediaType); err != nil { + return nil, err + } + for _, layer := range s2.LayersDescriptors { + if err := SupportedSchema2MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromSchema2Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +// isSchema2ForeignLayer is a convenience wrapper to check if a given mime type +// is a compressed or decompressed schema 2 foreign layer. +func isSchema2ForeignLayer(mimeType string) bool { + switch mimeType { + case DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip: + return true + default: + return false + } +} + +// isSchema2Layer is a convenience wrapper to check if a given mime type is a +// compressed or decompressed schema 2 layer. +func isSchema2Layer(mimeType string) bool { + switch mimeType { + case DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType: + return true + default: + return false + } +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + // First make sure we support the media type of the original layer. + if err := SupportedSchema2MediaType(original[i].MediaType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) + } + + // Set the correct media types based on the specified compression + // operation, the desired compression algorithm AND the original media + // type. + // + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed, which + // is being addressed in `isSchema2{Foreign}Layer`. + switch info.CompressionOperation { + case types.PreserveOriginal: + // Keep the original media type. + m.LayersDescriptors[i].MediaType = original[i].MediaType + + case types.Decompress: + // Decompress the original media type and check if it was + // non-distributable one or not. + mimeType := original[i].MediaType + switch { + case isSchema2ForeignLayer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaType + case isSchema2Layer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2SchemaLayerMediaTypeUncompressed + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) + } + + case types.Compress: + if info.CompressionAlgorithm == nil { + logrus.Debugf("Preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) + m.LayersDescriptors[i].MediaType = original[i].MediaType + break + } + // Compress the original media type and set the new one based on + // that type (distributable or not) and the specified compression + // algorithm. Throw an error if the algorithm is not supported. + switch info.CompressionAlgorithm.Name() { + case compression.Gzip.Name(): + mimeType := original[i].MediaType + switch { + case isSchema2ForeignLayer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaTypeGzip + case isSchema2Layer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2LayerMediaType + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + case compression.Zstd.Name(): + return fmt.Errorf("Error preparing updated manifest: zstd compression is not supported for docker images") + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) + } + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: &s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Os: s2.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + i.Env = s2.Config.Env + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/v4/manifest/manifest.go b/vendor/github.com/containers/image/v4/manifest/manifest.go new file mode 100644 index 000000000..32af97ea8 --- /dev/null +++ b/vendor/github.com/containers/image/v4/manifest/manifest.go @@ -0,0 +1,257 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v4/types" + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzippped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. +func SupportedSchema2MediaType(m string) error { + switch m { + case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: + return nil + default: + return fmt.Errorf("unsupported docker v2s2 media type: %q", m) + } +} + +// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource +// should request from the backend unless directed otherwise. +var DefaultRequestedManifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2Schema1SignedMediaType, + DockerV2Schema1MediaType, + DockerV2ListMediaType, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []LayerInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + +// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. +type LayerInfo struct { + types.BlobInfo + EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures interface{} `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // best effort to understand if this is an OCI image since mediaType + // isn't in the manifest for OCI anymore + // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig { + return imgspecv1.MediaTypeImageManifest + } + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest { + return imgspecv1.MediaTypeImageIndex + } + return DockerV2Schema2MediaType + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. +// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). +func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err // Coverage: This can fail only if rand.Reader fails. + } + + js, err := libtrust.NewJSONSignature(manifest) + if err != nil { + return nil, err + } + if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. + return nil, err + } + return js.PrettySignature("signatures") +} + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + switch NormalizedMIMEType(mt) { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func layerInfosToStrings(infos []LayerInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} diff --git a/vendor/github.com/containers/image/v4/manifest/oci.go b/vendor/github.com/containers/image/v4/manifest/oci.go new file mode 100644 index 000000000..e483bbb19 --- /dev/null +++ b/vendor/github.com/containers/image/v4/manifest/oci.go @@ -0,0 +1,243 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v4/pkg/compression" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. +func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + Annotations: desc.Annotations, + MediaType: desc.MediaType, + } +} + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// SupportedOCI1MediaType checks if the specified string is a supported OCI1 media type. +func SupportedOCI1MediaType(m string) error { + switch m { + case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader: + return nil + default: + return fmt.Errorf("unsupported OCIv1 media type: %q", m) + } +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedOCI1MediaType(oci1.Config.MediaType); err != nil { + return nil, err + } + for _, layer := range oci1.Layers { + if err := SupportedOCI1MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return BlobInfoFromOCI1Descriptor(m.Config) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromOCI1Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +// isOCI1NonDistributableLayer is a convenience wrapper to check if a given mime +// type is a compressed or decompressed OCI v1 non-distributable layer. +func isOCI1NonDistributableLayer(mimeType string) bool { + switch mimeType { + case imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return true + default: + return false + } +} + +// isOCI1Layer is a convenience wrapper to check if a given mime type is a +// compressed or decompressed OCI v1 layer. +func isOCI1Layer(mimeType string) bool { + switch mimeType { + case imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd: + return true + default: + return false + } +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + // First make sure we support the media type of the original layer. + if err := SupportedOCI1MediaType(original[i].MediaType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) + } + + // Set the correct media types based on the specified compression + // operation, the desired compression algorithm AND the original media + // type. + // + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed, which + // is being addressed in `isSchema2{Foreign}Layer`. + switch info.CompressionOperation { + case types.PreserveOriginal: + // Keep the original media type. + m.Layers[i].MediaType = original[i].MediaType + + case types.Decompress: + // Decompress the original media type and check if it was + // non-distributable one or not. + mimeType := original[i].MediaType + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) + } + + case types.Compress: + if info.CompressionAlgorithm == nil { + logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) + m.Layers[i].MediaType = original[i].MediaType + break + } + // Compress the original media type and set the new one based on + // that type (distributable or not) and the specified compression + // algorithm. Throw an error if the algorithm is not supported. + mimeType := original[i].MediaType + switch info.CompressionAlgorithm.Name() { + case compression.Gzip.Name(): + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + + case compression.Zstd.Name(): + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableZstd + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) + } + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + json.Unmarshal(config, d1) + i := &types.ImageInspectInfo{ + Tag: "", + Created: v1.Created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + Env: d1.Config.Env, + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v4/oci/archive/oci_dest.go new file mode 100644 index 000000000..2455ed575 --- /dev/null +++ b/vendor/github.com/containers/image/v4/oci/archive/oci_dest.go @@ -0,0 +1,151 @@ +package archive + +import ( + "context" + "io" + "os" + + "github.com/containers/image/v4/types" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" +) + +type ociArchiveImageDestination struct { + ref ociArchiveReference + unpackedDest types.ImageDestination + tempDirRef tempDirOCIRef +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { + tempDirRef, err := createOCIRef(ref.image) + if err != nil { + return nil, errors.Wrapf(err, "error creating oci reference") + } + unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageDestination{ref: ref, + unpackedDest: unpackedDest, + tempDirRef: tempDirRef}, nil +} + +// Reference returns the reference used to set up this destination. +func (d *ociArchiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any +// Close deletes the temp directory of the oci-archive image +func (d *ociArchiveImageDestination) Close() error { + defer d.tempDirRef.deleteTempDir() + return d.unpackedDest.Close() +} + +func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { + return d.unpackedDest.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures +func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { + return d.unpackedDest.SupportsSignatures(ctx) +} + +func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return d.unpackedDest.DesiredLayerCompression() +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { + return d.unpackedDest.AcceptsForeignLayerURLs() +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise +func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { + return d.unpackedDest.MustMatchRuntimeOS() +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.unpackedDest.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes manifest to the destination +func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte) error { + return d.unpackedDest.PutManifest(ctx, m) +} + +func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + return d.unpackedDest.PutSignatures(ctx, signatures) +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted +// after the directory is made, it is tarred up into a file and the directory is deleted +func (d *ociArchiveImageDestination) Commit(ctx context.Context) error { + if err := d.unpackedDest.Commit(ctx); err != nil { + return errors.Wrapf(err, "error storing image %q", d.ref.image) + } + + // path of directory to tar up + src := d.tempDirRef.tempDirectory + // path to save tarred up file + dst := d.ref.resolvedFile + return tarDirectory(src, dst) +} + +// tar converts the directory at src and saves it to dst +func tarDirectory(src, dst string) error { + // input is a stream of bytes from the archive of the directory at path + input, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + return errors.Wrapf(err, "error retrieving stream of bytes from %q", src) + } + + // creates the tar file + outFile, err := os.Create(dst) + if err != nil { + return errors.Wrapf(err, "error creating tar file %q", dst) + } + defer outFile.Close() + + // copies the contents of the directory to the tar file + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + _, err = io.Copy(outFile, input) + + return err +} diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_src.go b/vendor/github.com/containers/image/v4/oci/archive/oci_src.go new file mode 100644 index 000000000..8a479883f --- /dev/null +++ b/vendor/github.com/containers/image/v4/oci/archive/oci_src.go @@ -0,0 +1,102 @@ +package archive + +import ( + "context" + "io" + + ocilayout "github.com/containers/image/v4/oci/layout" + "github.com/containers/image/v4/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociArchiveImageSource struct { + ref ociArchiveReference + unpackedSrc types.ImageSource + tempDirRef tempDirOCIRef +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource untars the file and saves it in a temp directory +func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { + tempDirRef, err := createUntarTempDir(ref) + if err != nil { + return nil, errors.Wrap(err, "error creating temp directory") + } + + unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageSource{ref: ref, + unpackedSrc: unpackedSrc, + tempDirRef: tempDirRef}, nil +} + +// LoadManifestDescriptor loads the manifest +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociArchRef, ok := imgRef.(ociArchiveReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") + } + tempDirRef, err := createUntarTempDir(ociArchRef) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory") + } + defer tempDirRef.deleteTempDir() + + descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index") + } + return descriptor, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociArchiveImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +// Close deletes the temporary directory at dst +func (s *ociArchiveImageSource) Close() error { + defer s.tempDirRef.deleteTempDir() + return s.unpackedSrc.Close() +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + return s.unpackedSrc.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v4/oci/archive/oci_transport.go new file mode 100644 index 000000000..c8808ecb5 --- /dev/null +++ b/vendor/github.com/containers/image/v4/oci/archive/oci_transport.go @@ -0,0 +1,192 @@ +package archive + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/v4/directory/explicitfilepath" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/internal/tmpdir" + "github.com/containers/image/v4/oci/internal" + ocilayout "github.com/containers/image/v4/oci/layout" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OCI archive +// it creates an oci-archive tar file by calling into the OCI transport +// tarring the directory created by oci and deleting the directory +var Transport = ociArchiveTransport{} + +type ociArchiveTransport struct{} + +// ociArchiveReference is an ImageReference for OCI Archive paths +type ociArchiveReference struct { + file string + resolvedFile string + image string +} + +func (t ociArchiveTransport) Name() string { + return "oci-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix +// into an ImageReference. +func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + file, image := internal.SplitPathAndImage(reference) + return NewReference(file, image) +} + +// NewReference returns an OCI reference for a file and a image. +func NewReference(file, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err + } + + if err := internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil +} + +func (ref ociArchiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +func (ref ociArchiveReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.file, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +func (ref ociArchiveReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +func (ref ociArchiveReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return fmt.Sprintf("%s", ref.resolvedFile) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set +func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// struct to store the ociReference and temporary directory returned by createOCIRef +type tempDirOCIRef struct { + tempDirectory string + ociRefExtracted types.ImageReference +} + +// deletes the temporary directory created +func (t *tempDirOCIRef) deleteTempDir() error { + return os.RemoveAll(t.tempDirectory) +} + +// createOCIRef creates the oci reference of the image +func createOCIRef(image string) (tempDirOCIRef, error) { + dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") + if err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") + } + ociRef, err := ocilayout.NewReference(dir, image) + if err != nil { + return tempDirOCIRef{}, err + } + + tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} + return tempDirRef, nil +} + +// creates the temporary directory and copies the tarred content to it +func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) { + tempDirRef, err := createOCIRef(ref.image) + if err != nil { + return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference") + } + src := ref.resolvedFile + dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + if err := archive.UntarPath(src, dst); err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory) + } + return tempDirRef, nil +} diff --git a/vendor/github.com/containers/image/v4/oci/internal/oci_util.go b/vendor/github.com/containers/image/v4/oci/internal/oci_util.go new file mode 100644 index 000000000..c2012e50e --- /dev/null +++ b/vendor/github.com/containers/image/v4/oci/internal/oci_util.go @@ -0,0 +1,126 @@ +package internal + +import ( + "github.com/pkg/errors" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = errors.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + sep := strings.SplitN(reference, ":", 2) + path := sep[0] + + var image string + if len(sep) == 2 { + image = sep[1] + } + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v4/oci/layout/oci_dest.go new file mode 100644 index 000000000..20925d3dc --- /dev/null +++ b/vendor/github.com/containers/image/v4/oci/layout/oci_dest.go @@ -0,0 +1,306 @@ +package layout + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageDestination struct { + ref ociReference + index imgspecv1.Index + sharedBlobDir string + acceptUncompressedLayers bool +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + } + } + + d := &ociImageDestination{ref: ref, index: *index} + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +func (d *ociImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + imgspecv1.MediaTypeImageManifest, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Pushing signatures for OCI images is not supported") +} + +func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.acceptUncompressedLayers { + return types.PreserveOriginal + } + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *ociImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + } + + blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) + if err != nil { + return types.BlobInfo{}, err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return types.BlobInfo{}, err + } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, types.BlobInfo{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error { + digest, err := manifest.Digest(m) + if err != nil { + return err + } + desc := imgspecv1.Descriptor{} + desc.Digest = digest + // TODO(runcom): beaware and add support for OCI manifest list + desc.MediaType = imgspecv1.MediaTypeImageManifest + desc.Size = int64(len(m)) + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if d.ref.image != "" { + annotations := make(map[string]string) + annotations["org.opencontainers.image.ref.name"] = d.ref.image + desc.Annotations = annotations + } + desc.Platform = &imgspecv1.Platform{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + for i, manifest := range d.index.Manifests { + if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { + // TODO Should there first be a cleanup based on the descriptor we are going to replace? + d.index.Manifests[i] = *desc + return + } + } + d.index.Manifests = append(d.index.Manifests, *desc) +} + +func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + if len(signatures) != 0 { + return errors.Errorf("Pushing signatures for OCI images is not supported") + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) Commit(ctx context.Context) error { + if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_src.go b/vendor/github.com/containers/image/v4/oci/layout/oci_src.go new file mode 100644 index 000000000..dd6c6c4a6 --- /dev/null +++ b/vendor/github.com/containers/image/v4/oci/layout/oci_src.go @@ -0,0 +1,171 @@ +package layout + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + + "github.com/containers/image/v4/pkg/tlsclientconfig" + "github.com/containers/image/v4/types" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageSource struct { + ref ociReference + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} + if sys != nil { + // TODO(jonboulle): check dir existence? + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + return d, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + if instanceDigest == nil { + dig = digest.Digest(s.descriptor.Digest) + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + // XXX: instanceDigest means that we don't immediately have the context of what + // mediaType the manifest has. In OCI this means that we don't know + // what reference it came from, so we just *assume* that its + // MediaTypeImageManifest. + // FIXME: We should actually be able to look up the manifest in the index, + // and see the MIME type there. + mimeType = imgspecv1.MediaTypeImageManifest + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + m, err := ioutil.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + + return m, mimeType, nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + return s.getExternalBlob(ctx, info.URLs) + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return [][]byte{}, nil +} + +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + errWrap := errors.New("failed fetching external blob from all urls") + for _, url := range urls { + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) + continue + } + + resp, err := s.client.Do(req.WithContext(ctx)) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + + return nil, 0, errWrap +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v4/oci/layout/oci_transport.go new file mode 100644 index 000000000..259852b4d --- /dev/null +++ b/vendor/github.com/containers/image/v4/oci/layout/oci_transport.go @@ -0,0 +1,264 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v4/directory/explicitfilepath" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/oci/internal" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="", it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json + image string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image := internal.SplitPathAndImage(reference) + return NewReference(dir, image) +} + +// NewReference returns an OCI reference for a directory and a image. +// +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func NewReference(dir, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return fmt.Sprintf("%s", ref.resolvedDir) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, err + } + + var d *imgspecv1.Descriptor + if ref.image == "" { + // return manifest if only one image is in the oci directory + if len(index.Manifests) == 1 { + d = &index.Manifests[0] + } else { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, ErrMoreThanOneImage + } + } else { + // if image specified, look through all manifests for a match + for _, md := range index.Manifests { + if md.MediaType != imgspecv1.MediaTypeImageManifest { + continue + } + refName, ok := md.Annotations["org.opencontainers.image.ref.name"] + if !ok { + continue + } + if refName == ref.image { + d = &md + break + } + } + } + if d == nil { + return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) + } + return *d, nil +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") + } + return ociRef.getManifestDescriptor() +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, "oci-layout") +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, "index.json") +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", errors.Wrapf(err, "unexpected digest reference %s", digest) + } + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil +} diff --git a/vendor/github.com/containers/image/v4/openshift/openshift-copies.go b/vendor/github.com/containers/image/v4/openshift/openshift-copies.go new file mode 100644 index 000000000..f45dc24c4 --- /dev/null +++ b/vendor/github.com/containers/image/v4/openshift/openshift-copies.go @@ -0,0 +1,1170 @@ +package openshift + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "github.com/pkg/errors" + "golang.org/x/net/http2" + "k8s.io/client-go/util/homedir" +) + +// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig. +// restTLSClientConfig contains settings to enable transport layer security +type restTLSClientConfig struct { + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config. +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type restConfig struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // TLSClientConfig contains settings to enable transport layer security + restTLSClientConfig + + // Server should be accessed without verifying the TLS + // certificate. For testing only. + Insecure bool +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. +// ClientConfig is used to make it easy to get an api server client +type clientConfig interface { + // ClientConfig returns a complete client config + ClientConfig() (*restConfig, error) +} + +// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. +func defaultClientConfig() clientConfig { + loadingRules := newOpenShiftClientConfigLoadingRules() + // REMOVED: Allowing command-line overriding of loadingRules + // REMOVED: clientcmd.ConfigOverrides + + clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) + + return clientConfig +} + +var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") + +// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. +// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. +// 1. --config value +// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file +func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { + chain := []string{} + + envVarFile := os.Getenv("KUBECONFIG") + if len(envVarFile) != 0 { + chain = append(chain, filepath.SplitList(envVarFile)...) + } else { + chain = append(chain, recommendedHomeFile) + } + + return &clientConfigLoadingRules{ + Precedence: chain, + // REMOVED: Migration support; run (oc login) to trigger migration + } +} + +// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type deferredLoadingClientConfig struct { + loadingRules *clientConfigLoadingRules + + clientConfig clientConfig +} + +// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { + return &deferredLoadingClientConfig{loadingRules: loadingRules} +} + +func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { + if config.clientConfig == nil { + // REMOVED: Support for concurrent use in multiple threads. + mergedConfig, err := config.loadingRules.Load() + if err != nil { + return nil, err + } + + var mergedClientConfig clientConfig + // REMOVED: Interactive fallback support. + mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig) + + config.clientConfig = mergedClientConfig + } + + return config.clientConfig, nil +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + mergedConfig, err := mergedClientConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: In-cluster service account configuration use. + + return mergedConfig, nil +} + +var ( + // DefaultCluster is the cluster config used when no other config is specified + // TODO: eventually apiserver should start on 443 and be secure by default + defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} + + // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name + envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} +) + +// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type directClientConfig struct { + config clientcmdConfig +} + +// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { + return &directClientConfig{config} +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *directClientConfig) ClientConfig() (*restConfig, error) { + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + configAuthInfo := config.getAuthInfo() + configClusterInfo := config.getCluster() + + clientConfig := &restConfig{} + clientConfig.Host = configClusterInfo.Server + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + + // only try to read the auth information if we are secure + if isConfigTransportTLS(*clientConfig) { + var err error + // REMOVED: Support for interactive fallback. + userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) + if err != nil { + return nil, err + } + mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig) + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig) + } + + return clientConfig, nil +} + +// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { + mergedConfig := &restConfig{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restConfig{} + configClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + mergo.MergeWithOverwrite(mergedConfig, configClientConfig) + + return mergedConfig, nil +} + +// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identifcation +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { + mergedConfig := &restConfig{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + + // REMOVED: prompting for missing information. + return mergedConfig, nil +} + +// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser +func canIdentifyUser(config restConfig) bool { + return len(config.Username) > 0 || + (len(config.CertFile) > 0 || len(config.CertData) > 0) || + len(config.BearerToken) > 0 + +} + +// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *directClientConfig) ConfirmUsable() error { + var validationErrors []error + validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) + validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { + return newErrConfigurationInvalid([]error{errEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. +func (config *directClientConfig) getContextName() string { + // REMOVED: overrides support + return config.config.CurrentContext +} + +// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. +func (config *directClientConfig) getAuthInfoName() string { + // REMOVED: overrides support + return config.getContext().AuthInfo +} + +// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. +func (config *directClientConfig) getClusterName() string { + // REMOVED: overrides support + return config.getContext().Cluster +} + +// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. +func (config *directClientConfig) getContext() clientcmdContext { + contexts := config.config.Contexts + contextName := config.getContextName() + + var mergedContext clientcmdContext + if configContext, exists := contexts[contextName]; exists { + mergo.MergeWithOverwrite(&mergedContext, configContext) + } + // REMOVED: overrides support + + return mergedContext +} + +var ( + errEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + errEmptyCluster = errors.New("cluster has no server defined") +) + +// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { + var validationErrors []error + + if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { + return []error{errEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) + defer clientCertCA.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { + var validationErrors []error + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + clientCertFile, err := os.Open(authInfo.ClientCertificate) + defer clientCertFile.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + clientKeyFile, err := os.Open(authInfo.ClientKey) + defer clientKeyFile.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. +func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { + authInfos := config.config.AuthInfos + authInfoName := config.getAuthInfoName() + + var mergedAuthInfo clientcmdAuthInfo + if configAuthInfo, exists := authInfos[authInfoName]; exists { + mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo) + } + // REMOVED: overrides support + + return mergedAuthInfo +} + +// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. +func (config *directClientConfig) getCluster() clientcmdCluster { + clusterInfos := config.config.Clusters + clusterInfoName := config.getClusterName() + + var mergedClusterInfo clientcmdCluster + mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster) + mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster) + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo) + } + // REMOVED: overrides support + + return mergedClusterInfo +} + +// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregateErr []error + +// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func newAggregate(errlist []error) error { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregateErr(errs) +} + +// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. +// Error is part of the error interface. +func (agg aggregateErr) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// REMOVED: aggregateErr.Errors + +// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +var _ error = errConfigurationInvalid{} + +// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. + +// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) +} + +// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present +type clientConfigLoadingRules struct { + Precedence []string +} + +// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { + errlist := []error{} + + kubeConfigFiles := []string{} + + // REMOVED: explicit path support + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + + kubeconfigs := []*clientcmdConfig{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := loadFromFile(filename) + if os.IsNotExist(err) { + // skip missing files + continue + } + if err != nil { + errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + // first merge all of our maps + mapConfig := clientcmdNewConfig() + for _, kubeconfig := range kubeconfigs { + mergo.MergeWithOverwrite(mapConfig, kubeconfig) + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdNewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + mergo.MergeWithOverwrite(nonMapConfig, kubeconfig) + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdNewConfig() + mergo.MergeWithOverwrite(config, mapConfig) + mergo.MergeWithOverwrite(config, nonMapConfig) + + // REMOVED: Possibility to skip this. + if err := resolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + + return config, newAggregate(errlist) +} + +// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile +// LoadFromFile takes a filename and deserializes the contents into Config object +func loadFromFile(filename string) (*clientcmdConfig, error) { + kubeconfigBytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := load(kubeconfigBytes) + if err != nil { + return nil, err + } + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdAuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdCluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdContext{} + } + + return config, nil +} + +// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func load(data []byte) (*clientcmdConfig, error) { + config := clientcmdNewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + // Note: This does absolutely no kind/version checking or conversions. + data, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, config); err != nil { + return nil, err + } + return config, nil +} + +// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func resolveLocalPaths(config *clientcmdConfig) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) + } + + if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) + } + + if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. +func getClusterFileReferences(cluster *clientcmdCluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. +func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} +} + +// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func resolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor. +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { + // REMOVED: Configurable GroupVersion, Codec + // REMOVED: Configurable versionedAPIPath + baseURL, err := defaultServerURLFor(config) + if err != nil { + return nil, nil, err + } + + transport, err := transportFor(config) + if err != nil { + return nil, nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + } + + // REMOVED: Configurable QPS, Burst, ContentConfig + // REMOVED: Actually returning a RESTClient object. + return baseURL, httpClient, nil +} + +// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL. +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { + if host == "" { + return nil, errors.Errorf("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil { + return nil, err + } + if hostURL.Scheme == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // REMOVED: versionedAPIPath computation. + return hostURL, nil +} + +// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor. +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerURLFor(config *restConfig) (*url.URL, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 + hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + // REMOVED: Configurable APIPath, GroupVersion + return defaultServerURL(host, defaultTLS) +} + +// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor. +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func transportFor(config *restConfig) (http.RoundTripper, error) { + // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support + return transportNew(config) +} + +// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS. +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func isConfigTransportTLS(config restConfig) bool { + baseURL, err := defaultServerURLFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func transportNew(config *restConfig) (http.RoundTripper, error) { + // REMOVED: custom config.Transport support. + // Set transport level security + + var ( + rt http.RoundTripper + err error + ) + + rt, err = tlsCacheGet(config) + if err != nil { + return nil, err + } + + // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. + if len(config.Username) != 0 && len(config.BearerToken) != 0 { + return nil, errors.Errorf("username/password or bearer token may be set, but not both") + } + + return rt, nil +} + +// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} + +// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. +func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { + // REMOVED: any actual caching + + // Get the TLS options for this client config + tlsConfig, err := tlsConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil { + return http.DefaultTransport, nil + } + + // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. + t := &http.Transport{ + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + } + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { + _ = http2.ConfigureTransport(t) + } + return t, nil +} + +// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func tlsConfigFor(c *restConfig) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { + return nil, nil + } + if c.HasCA() && c.Insecure { + return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) + MinVersion: tls.VersionTLS10, + InsecureSkipVerify: c.Insecure, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.CertData, c.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *restConfig) error { + var err error + c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + if err != nil { + return err + } + + c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + if err != nil { + return err + } + + c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} + +// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. +// HasCA returns whether the configuration has a certificate authority or not. +func (c *restConfig) HasCA() bool { + return len(c.CAData) > 0 || len(c.CAFile) > 0 +} + +// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *restConfig) HasCertAuth() bool { + return len(c.CertData) != 0 || len(c.CertFile) != 0 +} + +// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type clientcmdConfig struct { + // Clusters is a map of referencable names to cluster configs + Clusters clustersMap `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos authInfosMap `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts contextsMap `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` +} + +type clustersMap map[string]*clientcmdCluster + +func (m *clustersMap) UnmarshalJSON(data []byte) error { + var a []v1NamedCluster + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + cluster := e.Cluster // Allocates a new instance in each iteration + (*m)[e.Name] = &cluster + } + return nil +} + +type authInfosMap map[string]*clientcmdAuthInfo + +func (m *authInfosMap) UnmarshalJSON(data []byte) error { + var a []v1NamedAuthInfo + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + authInfo := e.AuthInfo // Allocates a new instance in each iteration + (*m)[e.Name] = &authInfo + } + return nil +} + +type contextsMap map[string]*clientcmdContext + +func (m *contextsMap) UnmarshalJSON(data []byte) error { + var a []v1NamedContext + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + context := e.Context // Allocates a new instance in each iteration + (*m)[e.Name] = &context + } + return nil +} + +// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func clientcmdNewConfig() *clientcmdConfig { + return &clientcmdConfig{ + Clusters: make(map[string]*clientcmdCluster), + AuthInfos: make(map[string]*clientcmdAuthInfo), + Contexts: make(map[string]*clientcmdContext), + } +} + +// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. +// Cluster contains information about how to communicate with a kubernetes cluster +type clientcmdCluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` +} + +// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type clientcmdAuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `json:"token,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `json:"password,omitempty"` +} + +// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type clientcmdContext struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `json:"namespace,omitempty"` +} + +// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. +// NamedCluster relates nicknames to cluster information +type v1NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster clientcmdCluster `json:"cluster"` +} + +// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. +// NamedContext relates nicknames to context information +type v1NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context clientcmdContext `json:"context"` +} + +// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. +// NamedAuthInfo relates nicknames to auth information +type v1NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo clientcmdAuthInfo `json:"user"` +} diff --git a/vendor/github.com/containers/image/v4/openshift/openshift.go b/vendor/github.com/containers/image/v4/openshift/openshift.go new file mode 100644 index 000000000..51fff6269 --- /dev/null +++ b/vendor/github.com/containers/image/v4/openshift/openshift.go @@ -0,0 +1,562 @@ +package openshift + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v4/docker" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/containers/image/v4/version" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// openshiftClient is configuration for dealing with a single image stream, for reading or writing. +type openshiftClient struct { + ref openshiftReference + baseURL *url.URL + // Values from Kubernetes configuration + httpClient *http.Client + bearerToken string // "" if not used + username string // "" if not used + password string // if username != "" +} + +// newOpenshiftClient creates a new openshiftClient for the specified reference. +func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { + // We have already done this parsing in ParseReference, but thrown away + // httpClient. So, parse again. + // (We could also rework/split restClientFor to "get base URL" to be done + // in ParseReference, and "get httpClient" to be done here. But until/unless + // we support non-default clusters, this is good enough.) + + // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. + cmdConfig := defaultClientConfig() + logrus.Debugf("cmdConfig: %#v", cmdConfig) + restConfig, err := cmdConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) + logrus.Debugf("restConfig: %#v", restConfig) + baseURL, httpClient, err := restClientFor(restConfig) + if err != nil { + return nil, err + } + logrus.Debugf("URL: %#v", *baseURL) + + if httpClient == nil { + httpClient = http.DefaultClient + } + + return &openshiftClient{ + ref: ref, + baseURL: baseURL, + httpClient: httpClient, + bearerToken: restConfig.BearerToken, + username: restConfig.Username, + password: restConfig.Password, + }, nil +} + +// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. +func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { + url := *c.baseURL + url.Path = path + var requestBodyReader io.Reader + if requestBody != nil { + logrus.Debugf("Will send body: %s", requestBody) + requestBodyReader = bytes.NewReader(requestBody) + } + req, err := http.NewRequest(method, url.String(), requestBodyReader) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + if len(c.bearerToken) != 0 { + req.Header.Set("Authorization", "Bearer "+c.bearerToken) + } else if len(c.username) != 0 { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Accept", "application/json, */*") + req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) + if requestBody != nil { + req.Header.Set("Content-Type", "application/json") + } + + logrus.Debugf("%s %s", method, url.String()) + res, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + logrus.Debugf("Got body: %s", body) + // FIXME: Just throwing this useful information away only to try to guess later... + logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) + + var status status + statusValid := false + if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { + statusValid = true + } + + switch { + case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. + if statusValid && status.Status != "Success" { + return nil, errors.New(status.Message) + } + case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: + // OK. + default: + if statusValid { + return nil, errors.New(status.Message) + } + return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) + } + + return body, nil +} + +// getImage loads the specified image object. +func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) + body, err := c.doRequest(ctx, "GET", path, nil) + if err != nil { + return nil, err + } + // Note: This does absolutely no kind/version checking or conversions. + var isi imageStreamImage + if err := json.Unmarshal(body, &isi); err != nil { + return nil, err + } + return &isi.Image, nil +} + +// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; +// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. +func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { + parts := strings.SplitN(ref, "/", 2) + if len(parts) != 2 { + return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) + } + return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil +} + +type openshiftImageSource struct { + client *openshiftClient + // Values specific to this image + sys *types.SystemContext + // State + docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet + imageStreamImageName string // Resolved image identifier, or "" if not known yet +} + +// newImageSource creates a new ImageSource for the specified reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + return &openshiftImageSource{ + client: client, + sys: sys, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *openshiftImageSource) Reference() types.ImageReference { + return s.client.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *openshiftImageSource) Close() error { + if s.docker != nil { + err := s.docker.Close() + s.docker = nil + + return err + } + + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, "", err + } + return s.docker.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, 0, err + } + return s.docker.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var imageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageName = s.imageStreamImageName + } else { + imageName = instanceDigest.String() + } + image, err := s.client.getImage(ctx, imageName) + if err != nil { + return nil, err + } + var sigs [][]byte + for _, sig := range image.Signatures { + if sig.Type == imageSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} + +// ensureImageIsResolved sets up s.docker and s.imageStreamImageName +func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { + if s.docker != nil { + return nil + } + + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) + body, err := s.client.doRequest(ctx, "GET", path, nil) + if err != nil { + return err + } + // Note: This does absolutely no kind/version checking or conversions. + var is imageStream + if err := json.Unmarshal(body, &is); err != nil { + return err + } + var te *tagEvent + for _, tag := range is.Status.Tags { + if tag.Tag != s.client.ref.dockerReference.Tag() { + continue + } + if len(tag.Items) > 0 { + te = &tag.Items[0] + break + } + } + if te == nil { + return errors.Errorf("No matching tag found") + } + logrus.Debugf("tag event %#v", te) + dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) + if err != nil { + return err + } + logrus.Debugf("Resolved reference %#v", dockerRefString) + dockerRef, err := docker.ParseReference("//" + dockerRefString) + if err != nil { + return err + } + d, err := dockerRef.NewImageSource(ctx, s.sys) + if err != nil { + return err + } + s.docker = d + s.imageStreamImageName = te.Image + return nil +} + +type openshiftImageDestination struct { + client *openshiftClient + docker types.ImageDestination // The Docker Registry endpoint + // State + imageStreamImageName string // "" if not yet known +} + +// newImageDestination creates a new ImageDestination for the specified reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, + // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know + // the manifest digest at this point. + dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) + dockerRef, err := docker.ParseReference(dockerRefString) + if err != nil { + return nil, err + } + docker, err := dockerRef.NewImageDestination(ctx, sys) + if err != nil { + return nil, err + } + + return &openshiftImageDestination{ + client: client, + docker: docker, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *openshiftImageDestination) Reference() types.ImageReference { + return d.client.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *openshiftImageDestination) Close() error { + return d.docker.Close() +} + +func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { + return d.docker.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.docker.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte) error { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return err + } + d.imageStreamImageName = manifestDigest.String() + + return d.docker.PutManifest(ctx, m) +} + +func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + if d.imageStreamImageName == "" { + return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") + } + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures. + + if len(signatures) == 0 { + return nil // No need to even read the old state. + } + + image, err := d.client.getImage(ctx, d.imageStreamImageName) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range image.Signatures { + existingSigNames[sig.objectMeta.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range image.Signatures { + if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "Error generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + // Note: This does absolutely no kind/version checking or conversions. + sig := imageSignature{ + typeMeta: typeMeta{ + Kind: "ImageSignature", + APIVersion: "v1", + }, + objectMeta: objectMeta{Name: signatureName}, + Type: imageSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body) + if err != nil { + return err + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *openshiftImageDestination) Commit(ctx context.Context) error { + return d.docker.Commit(ctx) +} + +// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. +type imageStream struct { + Status imageStreamStatus `json:"status,omitempty"` +} +type imageStreamStatus struct { + DockerImageRepository string `json:"dockerImageRepository"` + Tags []namedTagEventList `json:"tags,omitempty"` +} +type namedTagEventList struct { + Tag string `json:"tag"` + Items []tagEvent `json:"items"` +} +type tagEvent struct { + DockerImageReference string `json:"dockerImageReference"` + Image string `json:"image"` +} +type imageStreamImage struct { + Image image `json:"image"` +} +type image struct { + objectMeta `json:"metadata,omitempty"` + DockerImageReference string `json:"dockerImageReference,omitempty"` + // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` + DockerImageManifest string `json:"dockerImageManifest,omitempty"` + // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` + Signatures []imageSignature `json:"signatures,omitempty"` +} + +const imageSignatureTypeAtomic string = "atomic" + +type imageSignature struct { + typeMeta `json:",inline"` + objectMeta `json:"metadata,omitempty"` + Type string `json:"type"` + Content []byte `json:"content"` + // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ImageIdentity string `json:"imageIdentity,omitempty"` + // SignedClaims map[string]string `json:"signedClaims,omitempty"` + // Created *unversioned.Time `json:"created,omitempty"` + // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` + // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` +} +type typeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} +type objectMeta struct { + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + Namespace string `json:"namespace,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + Generation int64 `json:"generation,omitempty"` + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status +type status struct { + Status string `json:"status,omitempty"` + Message string `json:"message,omitempty"` + // Reason StatusReason `json:"reason,omitempty"` + // Details *StatusDetails `json:"details,omitempty"` + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/github.com/containers/image/v4/openshift/openshift_transport.go b/vendor/github.com/containers/image/v4/openshift/openshift_transport.go new file mode 100644 index 000000000..f00c94561 --- /dev/null +++ b/vendor/github.com/containers/image/v4/openshift/openshift_transport.go @@ -0,0 +1,157 @@ +package openshift + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/containers/image/v4/docker/policyconfiguration" + "github.com/containers/image/v4/docker/reference" + genericImage "github.com/containers/image/v4/image" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OpenShift registry-hosted images. +var Transport = openshiftTransport{} + +type openshiftTransport struct{} + +func (t openshiftTransport) Name() string { + return "atomic" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// Note that imageNameRegexp is namespace/stream:tag, this +// is HOSTNAME/namespace/stream:tag or parent prefixes. +// Keep this in sync with imageNameRegexp! +var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { + if scopeRegexp.FindStringIndex(scope) == nil { + return errors.Errorf("Invalid scope name %s", scope) + } + return nil +} + +// openshiftReference is an ImageReference for OpenShift images. +type openshiftReference struct { + dockerReference reference.NamedTagged + namespace string // Computed from dockerReference in advance. + stream string // Computed from dockerReference in advance. +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. +func ParseReference(ref string) (types.ImageReference, error) { + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) + } + tagged, ok := r.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) + } + return NewReference(tagged) +} + +// NewReference returns an OpenShift reference for a reference.NamedTagged +func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { + r := strings.SplitN(reference.Path(dockerRef), "/", 3) + if len(r) != 2 { + return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", + reference.FamiliarString(dockerRef)) + } + return openshiftReference{ + namespace: r[0], + stream: r[1], + dockerReference: dockerRef, + }, nil +} + +func (ref openshiftReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref openshiftReference) StringWithinTransport() string { + return reference.FamiliarString(ref.dockerReference) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref openshiftReference) DockerReference() reference.Named { + return ref.dockerReference +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref openshiftReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) + if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref openshiftReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return genericImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for atomic: images") +} diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_dest.go b/vendor/github.com/containers/image/v4/ostree/ostree_dest.go new file mode 100644 index 000000000..9e1436e29 --- /dev/null +++ b/vendor/github.com/containers/image/v4/ostree/ostree_dest.go @@ -0,0 +1,504 @@ +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/containers/storage/pkg/archive" + "github.com/klauspost/pgzip" + "github.com/opencontainers/go-digest" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type blobToImport struct { + Size int64 + Digest digest.Digest + BlobPath string +} + +type descriptor struct { + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` +} + +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type manifestSchema struct { + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` +} + +type ostreeImageDestination struct { + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo +} + +// newImageDestination returns an ImageDestination for writing to an existing ostree. +func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { + tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) + if err := ensureDirectoryExists(tmpDirPath); err != nil { + return nil, err + } + return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ostreeImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } + return os.RemoveAll(d.tmpDirPath) +} + +func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") + if err != nil { + return types.BlobInfo{}, err + } + + blobPath := filepath.Join(tmpDir, "content") + blobFile, err := os.Create(blobPath) + if err != nil { + return types.BlobInfo{}, err + } + defer blobFile.Close() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + hash := computedDigest.Hex() + d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { + entries, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + + for _, info := range entries { + fullpath := filepath.Join(dir, info.Name()) + if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if err := os.Remove(fullpath); err != nil { + return err + } + continue + } + + if selinuxHnd != nil { + relPath, err := filepath.Rel(root, fullpath) + if err != nil { + return err + } + // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, + // thus we benefit from maintaining the same SELinux label they would have on the host as we could + // use hard links instead of copying the files. + relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) + + relPathC := C.CString(relPath) + defer C.free(unsafe.Pointer(relPathC)) + var context *C.char + + res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) + if int(res) < 0 && err != syscall.ENOENT { + return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) + } + if int(res) == 0 { + defer C.freecon(context) + fullpathC := C.CString(fullpath) + defer C.free(unsafe.Pointer(fullpathC)) + res, err = C.lsetfilecon_raw(fullpathC, context) + if int(res) < 0 { + return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context)) + } + } + } + + if info.IsDir() { + if usermode { + if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { + return err + } + } + err = fixFiles(selinuxHnd, root, fullpath, usermode) + if err != nil { + return err + } + } else if usermode && (info.Mode().IsRegular()) { + if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { + return err + } + } + } + + return nil +} + +func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { + opts := otbuiltin.NewCommitOptions() + opts.AddMetadataString = metadata + opts.Timestamp = time.Now() + // OCI layers have no parent OSTree commit + opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + _, err := repo.Commit(root, branch, opts) + return err +} + +func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { + mfz := pgzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return "", -1, err + } + defer stream.Close() + + gzReader, err := archive.DecompressStream(stream) + if err != nil { + return "", -1, err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return "", -1, err + } + + digester := digest.Canonical.Digester() + + written, err := io.Copy(digester.Hash(), its) + if err != nil { + return "", -1, err + } + + return digester.Digest(), written, nil +} + +func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") + if err := ensureDirectoryExists(destinationPath); err != nil { + return err + } + defer func() { + os.Remove(blob.BlobPath) + os.RemoveAll(destinationPath) + }() + + var tarSplitOutput bytes.Buffer + uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) + if err != nil { + return err + } + + if os.Getuid() == 0 { + if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { + return err + } + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { + return err + } + } else { + os.MkdirAll(destinationPath, 0755) + if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { + return err + } + + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { + return err + } + } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), + fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, types.BlobInfo{}, err + } + d.repo = repo + } + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + size, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return false, types.BlobInfo{}, err + } + + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { + d.manifest = string(manifestBlob) + + if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { + return err + } + + manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) + if err := ensureParentDirectoryExists(manifestPath); err != nil { + return err + } + + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + d.digest = digest + + return ioutil.WriteFile(manifestPath, manifestBlob, 0644) +} + +func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) + if err := ensureParentDirectoryExists(path); err != nil { + return err + } + + for i, sig := range signatures { + signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) + if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { + return err + } + } + d.signaturesLen = len(signatures) + return nil +} + +func (d *ostreeImageDestination) Commit(ctx context.Context) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := otbuiltin.OpenRepo(d.ref.repo) + if err != nil { + return err + } + + _, err = repo.PrepareTransaction() + if err != nil { + return err + } + + var selinuxHnd *C.struct_selabel_handle + + if os.Getuid() == 0 && selinux.GetEnabled() { + selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) + if selinuxHnd == nil { + return errors.Wrapf(err, "cannot open the SELinux DB") + } + + defer C.selabel_close(selinuxHnd) + } + + checkLayer := func(hash string) error { + blob := d.blobs[hash] + // if the blob is not present in d.blobs then it is already stored in OSTree, + // and we don't need to import it. + if blob == nil { + return nil + } + err := d.importBlob(selinuxHnd, repo, blob) + if err != nil { + return err + } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) + if err != nil { + return err + } + } + + manifestPath := filepath.Join(d.tmpDirPath, "manifest") + + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} + if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { + return err + } + + _, err = repo.CommitTransaction() + return err +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_src.go b/vendor/github.com/containers/image/v4/ostree/ostree_src.go new file mode 100644 index 000000000..ecb6e3f84 --- /dev/null +++ b/vendor/github.com/containers/image/v4/ostree/ostree_src.go @@ -0,0 +1,416 @@ +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo + // get the compressed layer by its uncompressed checksum + compressed map[digest.Digest]digest.Digest +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { + return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { + var metadataKey string + if isCompressed { + metadataKey = "docker.uncompressed_size" + } else { + metadataKey = "docker.size" + } + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, metadataKey) + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + + blob := info.Digest.Hex() + + // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. + if s.compressed == nil { + _, err := s.LayerInfosForCopy(ctx) + if err != nil { + return nil, -1, err + } + + } + compressedBlob, isCompressed := s.compressed[info.Digest] + if isCompressed { + blob = compressedBlob.Hex() + } + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := pgzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + mfz.Close() + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + rc := ioutils.NewReadCloserWrapper(ots, func() error { + getter.Close() + mfz.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by this transport") + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := [][]byte{} + for i := int64(1); i <= lenSignatures; i++ { + sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sig, err := ioutil.ReadAll(sigReader) + if err != nil { + return nil, err + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + updatedBlobInfos := []types.BlobInfo{} + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + + man, err := manifest.FromBlob(manifestBlob, manifestType) + + s.compressed = make(map[digest.Digest]digest.Digest) + + layerBlobs := man.LayerInfos() + + for _, layerBlob := range layerBlobs { + branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) + found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return nil, err + } + + found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return nil, err + } + + uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) + if err != nil { + return nil, err + } + uncompressedDigest := digest.Digest(uncompressedDigestStr) + blobInfo := types.BlobInfo{ + Digest: uncompressedDigest, + Size: uncompressedSize, + MediaType: layerBlob.MediaType, + } + s.compressed[uncompressedDigest] = layerBlob.Digest + updatedBlobInfos = append(updatedBlobInfos, blobInfo) + } + return updatedBlobInfos, nil +} diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_transport.go b/vendor/github.com/containers/image/v4/ostree/ostree_transport.go new file mode 100644 index 000000000..d720cb7ac --- /dev/null +++ b/vendor/github.com/containers/image/v4/ostree/ostree_transport.go @@ -0,0 +1,252 @@ +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/containers/image/v4/directory/explicitfilepath" + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +const defaultOSTreeRepo = "/ostree/repo" + +// Transport is an ImageTransport for ostree paths. +var Transport = ostreeTransport{} + +type ostreeTransport struct{} + +func (t ostreeTransport) Name() string { + return "ostree" +} + +func init() { + transports.Register(Transport) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { + sep := strings.Index(scope, ":") + if sep < 0 { + return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) + } + repo := scope[:sep] + + if !strings.HasPrefix(repo, "/") { + return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) + } + cleaned := filepath.Clean(repo) + if cleaned != repo { + return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + // FIXME? In the namespaces within a repo, + // we could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// ostreeReference is an ImageReference for ostree paths. +type ostreeReference struct { + image string + branchName string + repo string +} + +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + +func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { + var repo = "" + var image = "" + s := strings.SplitN(ref, "@/", 2) + if len(s) == 1 { + image, repo = s[0], defaultOSTreeRepo + } else { + image, repo = s[0], "/"+s[1] + } + + return NewReference(image, repo) +} + +// NewReference returns an OSTree reference for a specified repo and image. +func NewReference(image string, repo string) (types.ImageReference, error) { + // image is not _really_ in a containers/image/docker/reference format; + // as far as the libOSTree ociimage/* namespace is concerned, it is more or + // less an arbitrary string with an implied tag. + // Parse the image using reference.ParseNormalizedNamed so that we can + // check whether the images has a tag specified and we can add ":latest" if needed + ostreeImage, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if reference.IsNameOnly(ostreeImage) { + image = image + ":latest" + } + + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) + if err != nil { + // With os.IsNotExist(err), the parent directory of repo is also not existent; + // that should ordinarily not happen, but it would be a bit weird to reject + // references which do not specify a repo just because the implicit defaultOSTreeRepo + // does not exist. + if os.IsNotExist(err) && repo == defaultOSTreeRepo { + resolved = repo + } else { + return nil, err + } + } + // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces + // from being ambiguous with values of PolicyConfigurationIdentity. + if strings.Contains(resolved, ":") { + return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) + } + + return ostreeReference{ + image: image, + branchName: encodeOStreeRef(image), + repo: resolved, + }, nil +} + +func (ref ostreeReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ostreeReference) StringWithinTransport() string { + return fmt.Sprintf("%s@%s", ref.image, ref.repo) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ostreeReference) DockerReference() reference.Named { + return nil +} + +func (ref ostreeReference) PolicyConfigurationIdentity() string { + return fmt.Sprintf("%s:%s", ref.repo, ref.image) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ostreeReference) PolicyConfigurationNamespaces() []string { + s := strings.SplitN(ref.image, ":", 2) + if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. + panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) + } + name := s[0] + res := []string{} + for { + res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} + +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + src, err := newImageSource(tmpDir, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageSource(tmpDir, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageDestination(ref, tmpDir) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for ostree: images") +} + +var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) + +func encodeOStreeRef(in string) string { + var buffer bytes.Buffer + for i := range in { + sub := in[i : i+1] + if ostreeRefRegexp.MatchString(sub) { + buffer.WriteString(sub) + } else { + buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) + } + + } + return buffer.String() +} + +// manifestPath returns a path for the manifest within a ostree using our conventions. +func (ref ostreeReference) manifestPath() string { + return filepath.Join("manifest", "manifest.json") +} + +// signaturePath returns a path for a signature within a ostree using our conventions. +func (ref ostreeReference) signaturePath(index int) string { + return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) +} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go new file mode 100644 index 000000000..85eb7d6f1 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go @@ -0,0 +1,332 @@ +// Package boltdb implements a BlobInfoCache backed by BoltDB. +package boltdb + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v4/types" + bolt "github.com/etcd-io/bbolt" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + + // FIXME: For CRI-O, does this need to hide information between different users? + + // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. + uncompressedDigestBucket = []byte("uncompressedDigest") + // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest + // (as a set of key=digest, value="" pairs) + digestByUncompressedBucket = []byte("digestByUncompressed") + // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing + // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). + knownLocationsBucket = []byte("knownLocations") +) + +// Concurrency: +// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely +// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. + +// pathLock contains a lock for a specific BoltDB database path. +type pathLock struct { + refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! + mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. +} + +var ( + // pathLocks contains a lock for each currently open file. + // This must be global so that independently created instances of boltDBCache exclude each other. + // The map is protected by pathLocksMutex. + // FIXME? Should this be based on device:inode numbers instead of paths instead? + pathLocks = map[string]*pathLock{} + pathLocksMutex = sync.Mutex{} +) + +// lockPath obtains the pathLock for path. +// The caller must call unlockPath eventually. +func lockPath(path string) { + pl := func() *pathLock { // A scope for defer + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if ok { + pl.refCount++ + } else { + pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} + pathLocks[path] = pl + } + return pl + }() + pl.mutex.Lock() +} + +// unlockPath releases the pathLock for path. +func unlockPath(path string) { + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if !ok { + // Should this return an error instead? BlobInfoCache ultimately ignores errors… + panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) + } + pl.mutex.Unlock() + pl.refCount-- + if pl.refCount == 0 { + delete(pathLocks, path) + } +} + +// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. +// +// Note that we don’t keep the database open across operations, because that would lock the file and block any other +// users; instead, we need to open/close it for every single write or lookup. +type cache struct { + path string +} + +// New returns a BlobInfoCache implementation which uses a BoltDB file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) types.BlobInfoCache { + return &cache{path: path} +} + +// view returns runs the specified fn within a read-only transaction on the database. +func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { + // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, + // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding + // a read lock, blocking any future writes. + // Hence this preliminary check, which is RACY: Another process could remove the file + // between the Lstat call and opening the database. + if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { + return err + } + + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.View(fn) +} + +// update returns runs the specified fn within a read-write transaction on the database. +func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, nil) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.Update(fn) +} + +// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. +func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { + if b := tx.Bucket(uncompressedDigestBucket); b != nil { + if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { + d, err := digest.Parse(string(uncompressedBytes)) + if err == nil { + return d + } + // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + } + // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if b := tx.Bucket(digestByUncompressedBucket); b != nil { + if b = b.Bucket([]byte(anyDigest.String())); b != nil { + c := b.Cursor() + if k, _ := c.First(); k != nil { // The bucket is non-empty + return anyDigest + } + } + } + return "" +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + var res digest.Digest + if err := bdc.view(func(tx *bolt.Tx) error { + res = bdc.uncompressedDigest(tx, anyDigest) + return nil + }); err != nil { // Including os.IsNotExist(err) + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + previous, err := digest.Parse(string(previousBytes)) + if err != nil { + return err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if err := b.Put(key, []byte(uncompressed.String())); err != nil { + return err + } + + b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) + if err != nil { + return err + } + if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) + if err != nil { + return err + } + value, err := time.Now().MarshalBinary() + if err != nil { + return err + } + if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { + b := scopeBucket.Bucket([]byte(digest.String())) + if b == nil { + return candidates + } + _ = b.ForEach(func(k, v []byte) error { + t := time.Time{} + if err := t.UnmarshalBinary(v); err != nil { + return err + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: types.BICLocationReference{Opaque: string(k)}, + }, + LastSeen: t, + }) + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + res := []prioritize.CandidateWithTime{} + var uncompressedDigestValue digest.Digest // = "" + if err := bdc.view(func(tx *bolt.Tx) error { + scopeBucket := tx.Bucket(knownLocationsBucket) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) + if scopeBucket == nil { + return nil + } + + res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) + if canSubstitute { + if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { + b := tx.Bucket(digestByUncompressedBucket) + if b != nil { + b = b.Bucket([]byte(uncompressedDigestValue.String())) + if b != nil { + if err := b.ForEach(func(k, _ []byte) error { + d, err := digest.Parse(string(k)) + if err != nil { + return err + } + if d != primaryDigest && d != uncompressedDigestValue { + res = bdc.appendReplacementCandidates(res, scopeBucket, d) + } + return nil + }); err != nil { + return err + } + } + } + if uncompressedDigestValue != primaryDigest { + res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) + } + } + } + return nil + }); err != nil { // Including os.IsNotExist(err) + return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) +} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go new file mode 100644 index 000000000..af136c36d --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go @@ -0,0 +1,75 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/containers/image/v4/pkg/blobinfocache/boltdb" + "github.com/containers/image/v4/pkg/blobinfocache/memory" + "github.com/containers/image/v4/types" + "github.com/sirupsen/logrus" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.boltdb" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, getRootlessUID()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return memory.New() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) + return memory.New() + } + + logrus.Debugf("Using blob info cache at %s", path) + return boltdb.New(path) +} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 000000000..7820119b0 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,110 @@ +// Package prioritize provides utilities for prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "sort" + "time" + + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" +) + +// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + Candidate types.BICReplacementCandidate // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) +} + +// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, +// along with the specially-treated digest values for the implementation of sort.Interface.Less +type candidateSortState struct { + cs []CandidateWithTime // The entries to sort + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) Len() int { + return len(css.cs) +} + +func (css *candidateSortState) Less(i, j int) bool { + xi := css.cs[i] + xj := css.cs[j] + + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.Candidate.Digest != xj.Candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.Candidate.Digest == css.primaryDigest { + return true + } + if xj.Candidate.Digest == css.primaryDigest { + return false + } + if css.uncompressedDigest != "" { + if xi.Candidate.Digest == css.uncompressedDigest { + return false + } + if xj.Candidate.Digest == css.uncompressedDigest { + return true + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { + return xi.LastSeen.After(xj.LastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time + return xi.LastSeen.After(xj.LastSeen) + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return xi.Candidate.Digest < xj.Candidate.Digest +} + +func (css *candidateSortState) Swap(i, j int) { + css.cs[i], css.cs[j] = css.cs[j], css.cs[i] +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the +// number of entries to limit, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + sort.Sort(&candidateSortState{ + cs: cs, + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }) + + resLength := len(cs) + if resLength > maxCandidates { + resLength = maxCandidates + } + res := make([]types.BICReplacementCandidate, resLength) + for i := range res { + res[i] = cs[i].Candidate + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), +// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) +} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go new file mode 100644 index 000000000..c51b9f5ce --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,145 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + "github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v4/types" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + } +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = map[digest.Digest]struct{}{} + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + for l, t := range locations { + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: l, + }, + LastSeen: t, + }) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + for d := range otherDigests { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d) + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go new file mode 100644 index 000000000..c5ce29a45 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go @@ -0,0 +1,49 @@ +// Package none implements a dummy BlobInfoCache which records no data. +package none + +import ( + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" +) + +// noCache implements a dummy BlobInfoCache which records no data. +type noCache struct { +} + +// NoCache implements BlobInfoCache by not recording any data. +// +// This exists primarily for implementations of configGetter for +// Manifest.Inspect, because configs only have one representation. +// Any use of BlobInfoCache with blobs should usually use at least a +// short-lived cache, ideally blobinfocache.DefaultCache. +var NoCache types.BlobInfoCache = noCache{} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return nil +} diff --git a/vendor/github.com/containers/image/v4/pkg/compression/compression.go b/vendor/github.com/containers/image/v4/pkg/compression/compression.go new file mode 100644 index 000000000..fd2f21549 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/compression/compression.go @@ -0,0 +1,149 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + "io/ioutil" + + "github.com/containers/image/v4/pkg/compression/internal" + "github.com/containers/image/v4/pkg/compression/types" + "github.com/klauspost/pgzip" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm("gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm("bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm("Xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm("zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return ioutil.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, level) +} + +// DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error detecting compression") + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error initializing decompression") + } + } else { + res = ioutil.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/v4/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v4/pkg/compression/internal/types.go new file mode 100644 index 000000000..6092a9517 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/compression/internal/types.go @@ -0,0 +1,57 @@ +package internal + +import "io" + +// CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// The caller must call Close() on the stream (even if the input stream does not need closing!). +type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm struct { + name string + prefix []byte + decompressor DecompressorFunc + compressor CompressorFunc +} + +// NewAlgorithm creates an Algorithm instance. +// This function exists so that Algorithm instances can only be created by code that +// is allowed to import this internal subpackage. +func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + return Algorithm{ + name: name, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, + } +} + +// Name returns the name for the compression algorithm. +func (c Algorithm) Name() string { + return c.name +} + +// AlgorithmCompressor returns the compressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmCompressor(algo Algorithm) CompressorFunc { + return algo.compressor +} + +// AlgorithmDecompressor returns the decompressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { + return algo.decompressor +} + +// AlgorithmPrefix returns the prefix field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmPrefix(algo Algorithm) []byte { + return algo.prefix +} diff --git a/vendor/github.com/containers/image/v4/pkg/compression/types/types.go b/vendor/github.com/containers/image/v4/pkg/compression/types/types.go new file mode 100644 index 000000000..ea43dc8cd --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/compression/types/types.go @@ -0,0 +1,13 @@ +package types + +import ( + "github.com/containers/image/v4/pkg/compression/internal" +) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// Algorithm is a compression algorithm provided and supported by pkg/compression. +// It can’t be supplied from the outside. +type Algorithm = internal.Algorithm diff --git a/vendor/github.com/containers/image/v4/pkg/compression/zstd.go b/vendor/github.com/containers/image/v4/pkg/compression/zstd.go new file mode 100644 index 000000000..962fe9676 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config.go b/vendor/github.com/containers/image/v4/pkg/docker/config/config.go new file mode 100644 index 000000000..e720dc865 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/docker/config/config.go @@ -0,0 +1,352 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v4/types" + helperclient "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerAuthConfig struct { + Auth string `json:"auth,omitempty"` +} + +type dockerConfigFile struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` +} + +var ( + defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") + xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") + dockerHomePath = filepath.FromSlash(".docker/config.json") + dockerLegacyHomePath = ".dockercfg" + + enableKeyring = false + + // ErrNotLoggedIn is returned for users not logged into a registry + // that they are trying to logout of + ErrNotLoggedIn = errors.New("not logged in") + // ErrNotSupported is returned for unsupported methods + ErrNotSupported = errors.New("not supported") +) + +// SetAuthentication stores the username and password in the auth.json file +func SetAuthentication(sys *types.SystemContext, registry, username, password string) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if ch, exists := auths.CredHelpers[registry]; exists { + return false, setAuthToCredHelper(ch, registry, username, password) + } + + // Set the credentials to kernel keyring if enableKeyring is true. + // The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms. + // Hence, we want to fall-back to using the authfile in case the keyring failed. + // However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring. + if enableKeyring { + err := setAuthToKernelKeyring(registry, username, password) + if err == nil { + logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username) + return false, nil + } + logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err) + } + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + auths.AuthConfigs[registry] = newCreds + return true, nil + }) +} + +// GetAuthentication returns the registry credentials stored in +// either auth.json file or .docker/config.json +// If an entry is not found empty strings are returned for the username and password +func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { + if sys != nil && sys.DockerAuthConfig != nil { + logrus.Debug("Returning credentials from DockerAuthConfig") + return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil + } + + if enableKeyring { + username, password, err := getAuthFromKernelKeyring(registry) + if err == nil { + logrus.Debug("returning credentials from kernel keyring") + return username, password, nil + } + } + + dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath) + var paths []string + pathToAuth, err := getPathToAuth(sys) + if err == nil { + paths = append(paths, pathToAuth) + } else { + // Error means that the path set for XDG_RUNTIME_DIR does not exist + // but we don't want to completely fail in the case that the user is pulling a public image + // Logging the error as a warning instead and moving on to pulling the image + logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) + } + paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath) + + for _, path := range paths { + legacyFormat := path == dockerLegacyPath + username, password, err := findAuthentication(registry, path, legacyFormat) + if err != nil { + logrus.Debugf("Credentials not found") + return "", "", err + } + if username != "" && password != "" { + logrus.Debugf("Returning credentials from %s", path) + return username, password, nil + } + } + logrus.Debugf("Credentials not found") + return "", "", nil +} + +// RemoveAuthentication deletes the credentials stored in auth.json +func RemoveAuthentication(sys *types.SystemContext, registry string) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + // First try cred helpers. + if ch, exists := auths.CredHelpers[registry]; exists { + return false, deleteAuthFromCredHelper(ch, registry) + } + + // Next if keyring is enabled try kernel keyring + if enableKeyring { + err := deleteAuthFromKernelKeyring(registry) + if err == nil { + logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry) + return false, nil + } + logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles") + } + + if _, ok := auths.AuthConfigs[registry]; ok { + delete(auths.AuthConfigs, registry) + } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { + delete(auths.AuthConfigs, normalizeRegistry(registry)) + } else { + return false, ErrNotLoggedIn + } + return true, nil + }) +} + +// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring +func RemoveAllAuthentication(sys *types.SystemContext) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if enableKeyring { + err := removeAllAuthFromKernelKeyring() + if err == nil { + logrus.Debugf("removing all credentials from kernel keyring") + return false, nil + } + logrus.Debugf("error removing credentials from kernel keyring") + } + auths.CredHelpers = make(map[string]string) + auths.AuthConfigs = make(map[string]dockerAuthConfig) + return true, nil + }) +} + +// getPath gets the path of the auth.json file +// The path can be overriden by the user if the overwrite-path flag is set +// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers +// Otherwise, the auth.json file is stored in /run/containers/UID +func getPathToAuth(sys *types.SystemContext) (string, error) { + if sys != nil { + if sys.AuthFilePath != "" { + return sys.AuthFilePath, nil + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil + } + } + + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. + // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. + _, err := os.Stat(runtimeDir) + if os.IsNotExist(err) { + // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory + // or made a typo while setting the environment variable, + // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. + return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) + } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. + return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil + } + return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil +} + +// readJSONFile unmarshals the authentications stored in the auth.json file and returns it +// or returns an empty dockerConfigFile data structure if auth.json does not exist +// if the file exists and is empty, readJSONFile returns an error +func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { + var auths dockerConfigFile + + raw, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + auths.AuthConfigs = map[string]dockerAuthConfig{} + return auths, nil + } + return dockerConfigFile{}, err + } + + if legacyFormat { + if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + return auths, nil + } + + if err = json.Unmarshal(raw, &auths); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + + return auths, nil +} + +// modifyJSON writes to auth.json if the dockerConfigFile has been updated +func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { + path, err := getPathToAuth(sys) + if err != nil { + return err + } + + dir := filepath.Dir(path) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0700); err != nil { + return errors.Wrapf(err, "error creating directory %q", dir) + } + } + + auths, err := readJSONFile(path, false) + if err != nil { + return errors.Wrapf(err, "error reading JSON file %q", path) + } + + updated, err := editor(&auths) + if err != nil { + return errors.Wrapf(err, "error updating %q", path) + } + if updated { + newData, err := json.MarshalIndent(auths, "", "\t") + if err != nil { + return errors.Wrapf(err, "error marshaling JSON %q", path) + } + + if err = ioutil.WriteFile(path, newData, 0755); err != nil { + return errors.Wrapf(err, "error writing to file %q", path) + } + } + + return nil +} + +func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds, err := helperclient.Get(p, registry) + if err != nil { + return "", "", err + } + return creds.Username, creds.Secret, nil +} + +func setAuthToCredHelper(credHelper, registry, username, password string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds := &credentials.Credentials{ + ServerURL: registry, + Username: username, + Secret: password, + } + return helperclient.Store(p, creds) +} + +func deleteAuthFromCredHelper(credHelper, registry string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.Erase(p, registry) +} + +// findAuthentication looks for auth of registry in path +func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { + auths, err := readJSONFile(path, legacyFormat) + if err != nil { + return "", "", errors.Wrapf(err, "error reading JSON file %q", path) + } + + // First try cred helpers. They should always be normalized. + if ch, exists := auths.CredHelpers[registry]; exists { + return getAuthFromCredHelper(ch, registry) + } + + // I'm feeling lucky + if val, exists := auths.AuthConfigs[registry]; exists { + return decodeDockerAuth(val.Auth) + } + + // bad luck; let's normalize the entries first + registry = normalizeRegistry(registry) + normalizedAuths := map[string]dockerAuthConfig{} + for k, v := range auths.AuthConfigs { + normalizedAuths[normalizeRegistry(k)] = v + } + if val, exists := normalizedAuths[registry]; exists { + return decodeDockerAuth(val.Auth) + } + return "", "", nil +} + +func decodeDockerAuth(s string) (string, string, error) { + decoded, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + // if it's invalid just skip, as docker does + return "", "", nil + } + user := parts[0] + password := strings.Trim(parts[1], "\x00") + return user, password, nil +} + +// convertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry/auth.go +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +func normalizeRegistry(registry string) string { + normalized := convertToHostname(registry) + switch normalized { + case "registry-1.docker.io", "docker.io": + return "index.docker.io" + } + return normalized +} diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go new file mode 100644 index 000000000..0cd73528b --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go @@ -0,0 +1,115 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/containers/image/v4/internal/pkg/keyctl" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const keyDescribePrefix = "container-registry-login:" + +func getAuthFromKernelKeyring(registry string) (string, string, error) { + userkeyring, err := keyctl.UserKeyring() + if err != nil { + return "", "", err + } + key, err := userkeyring.Search(genDescription(registry)) + if err != nil { + return "", "", err + } + authData, err := key.Get() + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(authData), "\x00", 2) + if len(parts) != 2 { + return "", "", nil + } + return parts[0], parts[1], nil +} + +func deleteAuthFromKernelKeyring(registry string) error { + userkeyring, err := keyctl.UserKeyring() + + if err != nil { + return err + } + key, err := userkeyring.Search(genDescription(registry)) + if err != nil { + return err + } + return key.Unlink() +} + +func removeAllAuthFromKernelKeyring() error { + keys, err := keyctl.ReadUserKeyring() + if err != nil { + return err + } + + userkeyring, err := keyctl.UserKeyring() + if err != nil { + return err + } + + for _, k := range keys { + keyAttr, err := k.Describe() + if err != nil { + return err + } + // split string "type;uid;gid;perm;description" + keyAttrs := strings.SplitN(keyAttr, ";", 5) + if len(keyAttrs) < 5 { + return errors.Errorf("Key attributes of %d are not avaliable", k.ID()) + } + keyDescribe := keyAttrs[4] + if strings.HasPrefix(keyDescribe, keyDescribePrefix) { + err := keyctl.Unlink(userkeyring, k) + if err != nil { + return errors.Wrapf(err, "error unlinking key %d", k.ID()) + } + logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) + } + } + return nil +} + +func setAuthToKernelKeyring(registry, username, password string) error { + keyring, err := keyctl.SessionKeyring() + if err != nil { + return err + } + id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) + if err != nil { + return err + } + + // sets all permission(view,read,write,search,link,set attribute) for current user + // it enables the user to search the key after it linked to user keyring and unlinked from session keyring + err = keyctl.SetPerm(id, keyctl.PermUserAll) + if err != nil { + return err + } + // link the key to userKeyring + userKeyring, err := keyctl.UserKeyring() + if err != nil { + return errors.Wrapf(err, "error getting user keyring") + } + err = keyctl.Link(userKeyring, id) + if err != nil { + return errors.Wrapf(err, "error linking the key to user keyring") + } + // unlink the key from session keyring + err = keyctl.Unlink(keyring, id) + if err != nil { + return errors.Wrapf(err, "error unlinking the key from session keyring") + } + return nil +} + +func genDescription(registry string) string { + return fmt.Sprintf("%s%s", keyDescribePrefix, registry) +} diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go new file mode 100644 index 000000000..9b0e8bee2 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux +// +build !386 !amd64 + +package config + +func getAuthFromKernelKeyring(registry string) (string, string, error) { + return "", "", ErrNotSupported +} + +func deleteAuthFromKernelKeyring(registry string) error { + return ErrNotSupported +} + +func setAuthToKernelKeyring(registry, username, password string) error { + return ErrNotSupported +} + +func removeAllAuthFromKernelKeyring() error { + return ErrNotSupported +} diff --git a/vendor/github.com/containers/image/v4/pkg/strslice/README.md b/vendor/github.com/containers/image/v4/pkg/strslice/README.md new file mode 100644 index 000000000..ae6097e82 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/strslice/README.md @@ -0,0 +1 @@ +This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/v4/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v4/pkg/strslice/strslice.go new file mode 100644 index 000000000..bad493fb8 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go new file mode 100644 index 000000000..0cd60778f --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go @@ -0,0 +1,483 @@ +package sysregistriesv2 + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/containers/image/v4/docker/reference" +) + +// systemRegistriesConfPath is the path to the system-wide registry +// configuration file and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' +var systemRegistriesConfPath = builtinRegistriesConfPath + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/etc/containers/registries.conf" + +// Endpoint describes a remote location of a registry. +type Endpoint struct { + // The endpoint's remote location. + Location string `toml:"location,omitempty"` + // If true, certs verification will be skipped and HTTP (non-TLS) + // connections will be allowed. + Insecure bool `toml:"insecure,omitempty"` +} + +// rewriteReference will substitute the provided reference `prefix` to the +// endpoints `location` from the `ref` and creates a new named reference from it. +// The function errors if the newly created reference is not parsable. +func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { + refString := ref.String() + if !refMatchesPrefix(refString, prefix) { + return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) + } + + newNamedRef := strings.Replace(refString, prefix, e.Location, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, errors.Wrapf(err, "error rewriting reference") + } + logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String()) + return newParsedRef, nil +} + +// Registry represents a registry. +type Registry struct { + // Prefix is used for matching images, and to translate one namespace to + // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` + // and we pull from "example.com/bar/myimage:latest", the image will + // effectively be pulled from "example.com/foo/bar/myimage:latest". + // If no Prefix is specified, it defaults to the specified location. + Prefix string `toml:"prefix"` + // A registry is an Endpoint too + Endpoint + // The registry's mirrors. + Mirrors []Endpoint `toml:"mirror,omitempty"` + // If true, pulling from the registry will be blocked. + Blocked bool `toml:"blocked,omitempty"` + // If true, mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` +} + +// PullSource consists of an Endpoint and a Reference. Note that the reference is +// rewritten according to the registries prefix and the Endpoint's location. +type PullSource struct { + Endpoint Endpoint + Reference reference.Named +} + +// PullSourcesFromReference returns a slice of PullSource's based on the passed +// reference. +func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { + var endpoints []Endpoint + + if r.MirrorByDigestOnly { + // Only use mirrors when the reference is a digest one. + if _, isDigested := ref.(reference.Canonical); isDigested { + endpoints = append(r.Mirrors, r.Endpoint) + } else { + endpoints = []Endpoint{r.Endpoint} + } + } else { + endpoints = append(r.Mirrors, r.Endpoint) + } + + sources := []PullSource{} + for _, ep := range endpoints { + rewritten, err := ep.rewriteReference(ref, r.Prefix) + if err != nil { + return nil, err + } + sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) + } + + return sources, nil +} + +// V1TOMLregistries is for backwards compatibility to sysregistries v1 +type V1TOMLregistries struct { + Registries []string `toml:"registries"` +} + +// V1TOMLConfig is for backwards compatibility to sysregistries v1 +type V1TOMLConfig struct { + Search V1TOMLregistries `toml:"search"` + Insecure V1TOMLregistries `toml:"insecure"` + Block V1TOMLregistries `toml:"block"` +} + +// V1RegistriesConf is the sysregistries v1 configuration format. +type V1RegistriesConf struct { + V1TOMLConfig `toml:"registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V1RegistriesConf) Nonempty() bool { + return (len(config.V1TOMLConfig.Search.Registries) != 0 || + len(config.V1TOMLConfig.Insecure.Registries) != 0 || + len(config.V1TOMLConfig.Block.Registries) != 0) +} + +// V2RegistriesConf is the sysregistries v2 configuration format. +type V2RegistriesConf struct { + Registries []Registry `toml:"registry"` + // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references + UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V2RegistriesConf) Nonempty() bool { + return (len(config.Registries) != 0 || + len(config.UnqualifiedSearchRegistries) != 0) +} + +// tomlConfig is the data type used to unmarshal the toml config. +type tomlConfig struct { + V2RegistriesConf + V1RegistriesConf // for backwards compatibility with sysregistries v1 +} + +// InvalidRegistries represents an invalid registry configurations. An example +// is when "registry.com" is defined multiple times in the configuration but +// with conflicting security settings. +type InvalidRegistries struct { + s string +} + +// Error returns the error string. +func (e *InvalidRegistries) Error() string { + return e.s +} + +// parseLocation parses the input string, performs some sanity checks and returns +// the sanitized input string. An error is returned if the input string is +// empty or if contains an "http{s,}://" prefix. +func parseLocation(input string) (string, error) { + trimmed := strings.TrimRight(input, "/") + + if trimmed == "" { + return "", &InvalidRegistries{s: "invalid location: cannot be empty"} + } + + if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { + msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) + return "", &InvalidRegistries{s: msg} + } + + return trimmed, nil +} + +// ConvertToV2 returns a v2 config corresponding to a v1 one. +func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { + regMap := make(map[string]*Registry) + // The order of the registries is not really important, but make it deterministic (the same for the same config file) + // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. + registryOrder := []string{} + + getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object + var err error + location, err = parseLocation(location) + if err != nil { + return nil, err + } + reg, exists := regMap[location] + if !exists { + reg = &Registry{ + Endpoint: Endpoint{Location: location}, + Mirrors: []Endpoint{}, + Prefix: location, + } + regMap[location] = reg + registryOrder = append(registryOrder, location) + } + return reg, nil + } + + for _, blocked := range config.V1TOMLConfig.Block.Registries { + reg, err := getRegistry(blocked) + if err != nil { + return nil, err + } + reg.Blocked = true + } + for _, insecure := range config.V1TOMLConfig.Insecure.Registries { + reg, err := getRegistry(insecure) + if err != nil { + return nil, err + } + reg.Insecure = true + } + + res := &V2RegistriesConf{ + UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, + } + for _, location := range registryOrder { + reg := regMap[location] + res.Registries = append(res.Registries, *reg) + } + return res, nil +} + +// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. +var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") + +// postProcess checks the consistency of all the configuration, looks for conflicts, +// and normalizes the configuration (e.g., sets the Prefix to Location if not set). +func (config *V2RegistriesConf) postProcess() error { + regMap := make(map[string][]*Registry) + + for i := range config.Registries { + reg := &config.Registries[i] + // make sure Location and Prefix are valid + var err error + reg.Location, err = parseLocation(reg.Location) + if err != nil { + return err + } + + if reg.Prefix == "" { + reg.Prefix = reg.Location + } else { + reg.Prefix, err = parseLocation(reg.Prefix) + if err != nil { + return err + } + } + + // make sure mirrors are valid + for _, mir := range reg.Mirrors { + mir.Location, err = parseLocation(mir.Location) + if err != nil { + return err + } + } + regMap[reg.Location] = append(regMap[reg.Location], reg) + } + + // Given a registry can be mentioned multiple times (e.g., to have + // multiple prefixes backed by different mirrors), we need to make sure + // there are no conflicts among them. + // + // Note: we need to iterate over the registries array to ensure a + // deterministic behavior which is not guaranteed by maps. + for _, reg := range config.Registries { + others, _ := regMap[reg.Location] + for _, other := range others { + if reg.Insecure != other.Insecure { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + if reg.Blocked != other.Blocked { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + } + } + + for i := range config.UnqualifiedSearchRegistries { + registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) + if err != nil { + return err + } + if !anchoredDomainRegexp.MatchString(registry) { + return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} + } + config.UnqualifiedSearchRegistries[i] = registry + } + + return nil +} + +// ConfigPath returns the path to the system-wide registry configuration file. +func ConfigPath(ctx *types.SystemContext) string { + confPath := systemRegistriesConfPath + if ctx != nil { + if ctx.SystemRegistriesConfPath != "" { + confPath = ctx.SystemRegistriesConfPath + } else if ctx.RootForImplicitAbsolutePaths != "" { + confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) + } + } + return confPath +} + +// configMutex is used to synchronize concurrent accesses to configCache. +var configMutex = sync.Mutex{} + +// configCache caches already loaded configs with config paths as keys and is +// used to avoid redudantly parsing configs. Concurrent accesses to the cache +// are synchronized via configMutex. +var configCache = make(map[string]*V2RegistriesConf) + +// InvalidateCache invalidates the registry cache. This function is meant to be +// used for long-running processes that need to reload potential changes made to +// the cached registry config files. +func InvalidateCache() { + configMutex.Lock() + defer configMutex.Unlock() + configCache = make(map[string]*V2RegistriesConf) +} + +// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. +func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { + configPath := ConfigPath(ctx) + + configMutex.Lock() + // if the config has already been loaded, return the cached registries + if config, inCache := configCache[configPath]; inCache { + configMutex.Unlock() + return config, nil + } + configMutex.Unlock() + + return TryUpdatingCache(ctx) +} + +// TryUpdatingCache loads the configuration from the provided `SystemContext` +// without using the internal cache. On success, the loaded configuration will +// be added into the internal registry cache. +func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { + configPath := ConfigPath(ctx) + + configMutex.Lock() + defer configMutex.Unlock() + + // load the config + config, err := loadRegistryConf(configPath) + if err != nil { + // Return an empty []Registry if we use the default config, + // which implies that the config path of the SystemContext + // isn't set. Note: if ctx.SystemRegistriesConfPath points to + // the default config, we will still return an error. + if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { + return &V2RegistriesConf{Registries: []Registry{}}, nil + } + return nil, err + } + + v2Config := &config.V2RegistriesConf + + // backwards compatibility for v1 configs + if config.V1RegistriesConf.Nonempty() { + if config.V2RegistriesConf.Nonempty() { + return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} + } + v2, err := config.V1RegistriesConf.ConvertToV2() + if err != nil { + return nil, err + } + v2Config = v2 + } + + if err := v2Config.postProcess(); err != nil { + return nil, err + } + + // populate the cache + configCache[configPath] = v2Config + return v2Config, nil +} + +// GetRegistries loads and returns the registries specified in the config. +// Note the parsed content of registry config files is cached. For reloading, +// use `InvalidateCache` and re-call `GetRegistries`. +func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.Registries, nil +} + +// UnqualifiedSearchRegistries returns a list of host[:port] entries to try +// for unqualified image search, in the returned order) +func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.UnqualifiedSearchRegistries, nil +} + +// refMatchesPrefix returns true iff ref, +// which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value. +// (This is split from the caller primarily to make testing easier.) +func refMatchesPrefix(ref, prefix string) bool { + switch { + case len(ref) < len(prefix): + return false + case len(ref) == len(prefix): + return ref == prefix + case len(ref) > len(prefix): + if !strings.HasPrefix(ref, prefix) { + return false + } + c := ref[len(prefix)] + // This allows "example.com:5000" to match "example.com", + // which is unintended; that will get fixed eventually, DON'T RELY + // ON THE CURRENT BEHAVIOR. + return c == ':' || c == '/' || c == '@' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// FindRegistry returns the Registry with the longest prefix for ref, +// which is a registry, repository namespace repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!). +// If no Registry prefixes the image, nil is returned. +func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + + reg := Registry{} + prefixLen := 0 + for _, r := range config.Registries { + if refMatchesPrefix(ref, r.Prefix) { + length := len(r.Prefix) + if length > prefixLen { + reg = r + prefixLen = length + } + } + } + if prefixLen != 0 { + return ®, nil + } + return nil, nil +} + +// Loads the registry configuration file from the filesystem and then unmarshals +// it. Returns the unmarshalled object. +func loadRegistryConf(configPath string) (*tomlConfig, error) { + config := &tomlConfig{} + + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + + err = toml.Unmarshal(configBytes, &config) + return config, err +} diff --git a/vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go new file mode 100644 index 000000000..6785564e8 --- /dev/null +++ b/vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go @@ -0,0 +1,112 @@ +package tlsclientconfig + +import ( + "crypto/tls" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc +func SetupCertificates(dir string, tlsc *tls.Config) error { + logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) + fs, err := ioutil.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + if os.IsPermission(err) { + logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) + return nil + } + return err + } + + for _, f := range fs { + fullPath := filepath.Join(dir, f.Name()) + if strings.HasSuffix(f.Name(), ".crt") { + logrus.Debugf(" crt: %s", fullPath) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + if os.IsNotExist(err) { + // Dangling symbolic link? + // Race with someone who deleted the + // file after we read the directory's + // list of contents? + logrus.Warnf("error reading certificate %q: %v", fullPath, err) + continue + } + return err + } + if tlsc.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return errors.Wrap(err, "unable to get system cert pool") + } + tlsc.RootCAs = systemPool + } + tlsc.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf(" cert: %s", fullPath) + if !hasFile(fs, keyName) { + return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) + if err != nil { + return err + } + tlsc.Certificates = append(tlsc.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf(" key: %s", fullPath) + if !hasFile(fs, certName) { + return errors.Errorf("missing client certificate %s for key %s", certName, keyName) + } + } + } + return nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// NewTransport Creates a default transport +func NewTransport() *http.Transport { + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + tr.Dial = proxyDialer.Dial + } + return tr +} diff --git a/vendor/github.com/containers/image/v4/signature/docker.go b/vendor/github.com/containers/image/v4/signature/docker.go new file mode 100644 index 000000000..c3ac33d48 --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/docker.go @@ -0,0 +1,65 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "fmt" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/manifest" + "github.com/opencontainers/go-digest" +) + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + return sig.sign(mech, keyIdentity) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, err + } + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if keyIdentity != expectedKeyIdentity { + return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} + } + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} + } + if signedRef.String() != expectedRef.String() { + return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", + signedDockerReference, expectedDockerReference)} + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} + } + return nil + }, + }) + if err != nil { + return nil, err + } + return sig, nil +} diff --git a/vendor/github.com/containers/image/v4/signature/json.go b/vendor/github.com/containers/image/v4/signature/json.go new file mode 100644 index 000000000..9e592863d --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/json.go @@ -0,0 +1,88 @@ +package signature + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// jsonFormatError is returned when JSON does not match expected format. +type jsonFormatError string + +func (err jsonFormatError) Error() string { + return string(err) +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { + seenKeys := map[string]struct{}{} + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t != json.Delim('{') { + return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + } + for { + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + } + if _, ok := seenKeys[key]; ok { + return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + } + seenKeys[key] = struct{}{} + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return jsonFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return jsonFormatError("Unexpected data after JSON object") + } + return nil +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { + seenKeys := map[string]struct{}{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if valuePtr, ok := exactFields[key]; ok { + seenKeys[key] = struct{}{} + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if _, ok := seenKeys[key]; !ok { + return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v4/signature/mechanism.go b/vendor/github.com/containers/image/v4/signature/mechanism.go new file mode 100644 index 000000000..bdf26c531 --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/mechanism.go @@ -0,0 +1,85 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "strings" + + "golang.org/x/crypto/openpgp" +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to +// eliminate ambiguities, support CA signatures and perhaps other key properties) +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls ot this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism(blob) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go new file mode 100644 index 000000000..4825ab27c --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go @@ -0,0 +1,175 @@ +// +build !containers_image_openpgp + +package signature + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + + "github.com/mtrmac/gpgme" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: "", + }, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + mech := &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: dir, + } + keyIdentities, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + + removeDir = false + return mech, keyIdentities, nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go new file mode 100644 index 000000000..eccd610c9 --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go @@ -0,0 +1,159 @@ +// +build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + "github.com/containers/storage/pkg/homedir" + "golang.org/x/crypto/openpgp" +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code” detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch” path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v4/signature/policy_config.go b/vendor/github.com/containers/image/v4/signature/policy_config.go new file mode 100644 index 000000000..bb229f5f1 --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/policy_config.go @@ -0,0 +1,688 @@ +// policy_config.go hanles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + return NewPolicyFromFile(defaultPolicyPath(sys)) +} + +// defaultPolicyPath returns a path to the default policy of the system. +func defaultPolicyPath(sys *types.SystemContext) string { + if sys != nil { + if sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + } + return systemDefaultPolicyPath +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // transport can be nil + transport := transports.Get(key) + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) + } + if len(keyPath) > 0 && len(keyData) > 0 { + return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyData = false, false + var signedIdentity json.RawMessage + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && gotKeyData: + return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") + case gotKeyPath && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyData: + return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") + default: // Coverage: This should never happen + return errors.Errorf("Impossible keyPath/keyData presence combination!?") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it resturns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval.go b/vendor/github.com/containers/image/v4/signature/policy_eval.go new file mode 100644 index 000000000..110d40f7c --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/policy_eval.go @@ -0,0 +1,289 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the fuctions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + + "github.com/containers/image/v4/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for futher processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInvalid policyContextState = "" + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeContextState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport "%s" policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for futher processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: rename Signatures to UnverifiedSignatures + // FIXME: pass context.Context + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go new file mode 100644 index 000000000..18fb651d1 --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/containers/image/v4/types" + "github.com/sirupsen/logrus" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go new file mode 100644 index 000000000..b8188da5e --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go @@ -0,0 +1,131 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/pkg/errors" + + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/types" + "github.com/opencontainers/go-digest" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) + } + + if pr.KeyPath != "" && pr.KeyData != nil { + return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) + } + // FIXME: move this to per-context initialization + var data []byte + if pr.KeyData != nil { + data = pr.KeyData + } else { + d, err := ioutil.ReadFile(pr.KeyPath) + if err != nil { + return sarRejected, nil, err + } + data = d + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + for _, trustedIdentity := range trustedIdentities { + if keyIdentity == trustedIdentity { + return nil + } + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME: pass context.Context + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v4/signature/policy_eval_simple.go new file mode 100644 index 000000000..7fbcf4a94 --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/github.com/containers/image/v4/signature/policy_reference_match.go b/vendor/github.com/containers/image/v4/signature/policy_reference_match.go new file mode 100644 index 000000000..016d737fb --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/policy_reference_match.go @@ -0,0 +1,101 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} + +func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} diff --git a/vendor/github.com/containers/image/v4/signature/policy_types.go b/vendor/github.com/containers/image/v4/signature/policy_types.go new file mode 100644 index 000000000..d3b33bb7a --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/policy_types.go @@ -0,0 +1,152 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/containers-policy.json.5.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "match-exact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} diff --git a/vendor/github.com/containers/image/v4/signature/signature.go b/vendor/github.com/containers/image/v4/signature/signature.go new file mode 100644 index 000000000..09c4de0b3 --- /dev/null +++ b/vendor/github.com/containers/image/v4/signature/signature.go @@ -0,0 +1,280 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/pkg/errors" + + "github.com/containers/image/v4/version" + "github.com/opencontainers/go-digest" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + UntrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + UntrustedDockerManifestDigest: dockerManifestDigest, + UntrustedDockerReference: dockerReference, + UntrustedCreatorID: &creatorID, + UntrustedTimestamp: ×tamp, + } +} + +// Compile-time check that untrustedSignature implements json.Marshaler +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]interface{}{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, + } + optional := map[string]interface{}{} + if s.UntrustedCreatorID != nil { + optional["creator"] = *s.UntrustedCreatorID + } + if s.UntrustedTimestamp != nil { + optional["timestamp"] = *s.UntrustedTimestamp + } + signature := map[string]interface{}{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if _, ok := err.(jsonFormatError); ok { + err = InvalidSignatureError{msg: err.Error()} + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. +// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore interface{} + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.UntrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} + } + s.UntrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} + } + + var digestString string + if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + s.UntrustedDockerManifestDigest = digest.Digest(digestString) + + return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + "docker-reference": &s.UntrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { + return nil, err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, + DockerReference: unmatchedSignature.UntrustedDockerReference, + }, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventualy do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.UntrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/vendor/github.com/containers/image/v4/storage/storage_image.go b/vendor/github.com/containers/image/v4/storage/storage_image.go new file mode 100644 index 000000000..4e913b84c --- /dev/null +++ b/vendor/github.com/containers/image/v4/storage/storage_image.go @@ -0,0 +1,956 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/internal/tmpdir" + "github.com/containers/image/v4/manifest" + "github.com/containers/image/v4/pkg/blobinfocache/none" + "github.com/containers/image/v4/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // ErrBlobDigestMismatch is returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + ErrBlobDigestMismatch = errors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = errors.New("blob size mismatch") + // ErrNoManifestLists is returned when GetManifest() is called. + // with a non-nil instanceDigest. + ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageSource struct { + imageRef storageReference + image *storage.Image + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice +} + +type storageImageDestination struct { + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice +} + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + +// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably; +// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey +func manifestBigDataKey(digest digest.Digest) string { + return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() +} + +// newImageSource sets up an image for reading. +func newImageSource(imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. + img, err := imageRef.resolveImage() + if err != nil { + return nil, err + } + + // Build the reader object. + image := &storageImageSource{ + imageRef: imageRef, + image: img, + layerPosition: make(map[digest.Digest]int), + SignatureSizes: []int{}, + } + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, errors.Wrap(err, "error decoding metadata for source image") + } + } + return image, nil +} + +// Reference returns the image reference that we used to find this image. +func (s *storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s *storageImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *storageImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { + if info.Digest == image.GzippedEmptyLayerDigest { + return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + } + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err +} + +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + s.getBlobMutex.Lock() + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + s.getBlobMutex.Unlock() + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + return nil, "", ErrNoManifestLists + } + if len(s.cachedManifest) == 0 { + // The manifest is stored as a big data item. + // Prefer the manifest corresponding to the user-specified digest, if available. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + key := manifestBigDataKey(digested.Digest()) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key + return nil, "", err + } + if err == nil { + s.cachedManifest = blob + } + } + } + // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. + // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). + if len(s.cachedManifest) == 0 { + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID) + } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID) + } + + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + // This is actually a compressed type, but there's no uncompressed type defined + uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType + } + + physicalBlobInfos := []types.BlobInfo{} + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "error reading layer %q in image %q", layerID, s.image.ID) + } + if layer.UncompressedDigest == "" { + return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) + } + if layer.UncompressedSize < 0 { + return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + layerID = layer.Parent + } + + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + if err != nil { + return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID) + } + return res, nil +} + +// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, +// but using layer data which we can actually produce — physicalInfos for non-empty layers, +// and image.GzippedEmptyLayer for empty ones. +// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { + nextPhysical := 0 + res := make([]types.BlobInfo, len(manifestInfos)) + for i, mi := range manifestInfos { + if mi.EmptyLayer { + res[i] = types.BlobInfo{ + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + MediaType: mi.MediaType, + } + } else { + if nextPhysical >= len(physicalInfos) { + return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) + } + res[i] = physicalInfos[nextPhysical] + nextPhysical++ + } + } + if nextPhysical != len(physicalInfos) { + return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) + } + return res, nil +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { + if instanceDigest != nil { + return nil, ErrNoManifestLists + } + var offset int + sigslice := [][]byte{} + signature := []byte{} + if len(s.SignatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, "signatures") + if err != nil { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.image.ID) + } + signature = signatureBlob + } + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } + image := &storageImageDestination{ + imageRef: imageRef, + directory: directory, + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + } + return image, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (s *storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up the temporary directory. +func (s *storageImageDestination) Close() error { + return os.RemoveAll(s.directory) +} + +func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { + // We ultimately have to decompress layers to populate trees on disk + // and need to explicitly ask for it here, so that the layers' MIME + // types can be set accordingly. + return types.PreserveOriginal +} + +func (s *storageImageDestination) computeNextBlobCacheFile() string { + return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Stores a layer or data blob in our temporary directory, checking that any information + // in the blobinfo matches the incoming data. + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + // Set up to digest the blob and count its size while saving it to a file. + hasher := digest.Canonical.Digester() + if blobinfo.Digest.Validate() == nil { + if a := blobinfo.Digest.Algorithm(); a.Available() { + hasher = a.Digester() + } + } + diffID := digest.Canonical.Digester() + filename := s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) + } + defer file.Close() + counter := ioutils.NewWriteCounter(hasher.Hash()) + reader := io.TeeReader(io.TeeReader(stream, counter), file) + decompressed, err := archive.DecompressStream(reader) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Ensure that any information that we were given about the blob is correct. + if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { + return errorBlobInfo, ErrBlobDigestMismatch + } + if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + return errorBlobInfo, ErrBlobSizeMismatch + } + // Record information about the blob. + s.putBlobMutex.Lock() + s.blobDiffIDs[hasher.Digest()] = diffID.Digest() + s.fileSizes[hasher.Digest()] = counter.Count + s.filenames[hasher.Digest()] = filename + s.putBlobMutex.Unlock() + blobDigest := blobinfo.Digest + if blobDigest.Validate() != nil { + blobDigest = hasher.Digest() + } + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count + } + // This is safe because we have just computed both values ourselves. + cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + return types.BlobInfo{ + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, + }, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.putBlobMutex.Lock() + defer s.putBlobMutex.Unlock() + if blobinfo.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) + } + if err := blobinfo.Digest.Validate(); err != nil { + return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) + } + + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: size, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].CompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Does the blob correspond to a known DiffID which we already have available? + // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the + // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. + if canSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest) + } + if len(layers) > 0 { + if blobinfo.Size != -1 { + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, blobinfo, nil + } + if !canSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) + } + s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + } + } + + // Nope, we don't have it. + return false, types.BlobInfo{}, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m := m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + for i, compat := range m.ExtractedV1Compatibility { + if compat.ThrowAway { + continue + } + blobSum := m.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. + default: + return "" + } + id, err := m.ImageID(diffIDs) + if err != nil { + return "" + } + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := ioutil.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") +} + +func (s *storageImageDestination) Commit(ctx context.Context) error { + // Find the list of layer blobs. + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "error parsing manifest") + } + layerBlobs := man.LayerInfos() + // Extract or find the layers. + lastLayer := "" + for _, blob := range layerBlobs { + if blob.EmptyLayer { + continue + } + + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // that relies on using a blob digest that has never been seeen by the store had better call + // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only + // so far we are going to accommodate that (if we should be doing that at all). + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + if err != nil { + return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + continue + } + // Check if we previously cached a file with that blob's contents. If we didn't, + // then we need to read the desired contents from a layer. + filename, ok := s.filenames[blob.Digest] + if !ok { + // Try to find the layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Read the layer's contents. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + // Copy the layer diff to a file. Diff() takes a lock that it holds + // until the ReadCloser that it returns is closed, and PutLayer() wants + // the same lock, so the diff can't just be directly streamed from one + // to the other. + filename = s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + diff.Close() + return errors.Wrapf(err, "error creating temporary file %q", filename) + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using + // ctx.Done(). + _, err = io.Copy(file, diff) + diff.Close() + file.Close() + if err != nil { + return errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Make sure that we can find this file later, should we need the layer's + // contents again. + s.filenames[blob.Digest] = filename + } + // Read the cached blob and use it as a diff. + file, err := os.Open(filename) + if err != nil { + return errors.Wrapf(err, "error opening file %q", filename) + } + defer file.Close() + // Build the new layer using the diff, regardless of where it came from. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file) + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { + return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + } + + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = *inspect.Created + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + if err != nil { + if errors.Cause(err) != storage.ErrDuplicateID { + logrus.Debugf("error creating image: %q", err) + return errors.Wrapf(err, "error creating image %q", intendedID) + } + img, err = s.imageRef.transport.store.Image(intendedID) + if err != nil { + return errors.Wrapf(err, "error reading image %q", intendedID) + } + if img.TopLayer != lastLayer { + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) + } + logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) + } else { + logrus.Debugf("created new image ID %q", img.ID) + } + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} + } + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) + } + } + // Set the reference's name on the image. + if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { + names := []string{} + if name != nil { + names = append(names, name.String()) + } + if len(oldNames) > 0 { + names = append(names, oldNames...) + } + if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) + return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) + } + logrus.Debugf("set names of image %q to %v", img.ID, names) + } + // Save the manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, + // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. + manifestDigest, err := manifest.Digest(s.manifest) + if err != nil { + return errors.Wrapf(err, "error computing manifest digest") + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return err + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return err + } + // Save the signatures, if we have any. + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err + } + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return err + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return err + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + return nil +} + +var manifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, +} + +func (s *storageImageDestination) SupportedManifestMIMETypes() []string { + return manifestMIMETypes +} + +// PutManifest writes the manifest to the destination. +func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest()) + if err != nil { + return err + } + if !matches { + return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest()) + } + } + } + + s.manifest = make([]byte, len(manifestBlob)) + copy(s.manifest, manifestBlob) + return nil +} + +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). +func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be +// uploaded to the image destination, true otherwise. +func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (s *storageImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { + return true // Yes, we want the unmodified manifest +} + +// PutSignatures records the image's signatures for committing as a single data blob. +func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + sizes := []int{} + sigblob := []byte{} + for _, sig := range signatures { + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + s.signatures = sigblob + s.SignatureSizes = sizes + return nil +} + +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image %q", s.image.ID) + } + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) + if err != nil { + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.image.ID) + } + sum += bigSize + } + // Add the signature sizes. + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + // Walk the layer list. + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + return sum, nil +} + +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { + src, err := newImageSource(s) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImageCloser{ImageCloser: img, size: size}, nil +} diff --git a/vendor/github.com/containers/image/v4/storage/storage_reference.go b/vendor/github.com/containers/image/v4/storage/storage_reference.go new file mode 100644 index 000000000..7ad20817b --- /dev/null +++ b/vendor/github.com/containers/image/v4/storage/storage_reference.go @@ -0,0 +1,225 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "context" + "strings" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/types" + "github.com/containers/storage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +// Either "named" or "id" must be set. +type storageReference struct { + transport storageTransport + named reference.Named // may include a tag and/or a digest + id string +} + +func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { + if named == nil && id == "" { + return nil, ErrInvalidReference + } + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + named: named, + id: id, + }, nil +} + +// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref +func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { + repo := ref.Name() + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if named.Name() == repo { + return true + } + } + } + return false +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID, and return the image. +func (s *storageReference) resolveImage() (*storage.Image, error) { + var loadedImage *storage.Image + if s.id == "" && s.named != nil { + // Look for an image that has the expanded reference name as an explicit Name value. + image, err := s.transport.store.Image(s.named.String()) + if image != nil && err == nil { + loadedImage = image + s.id = image.ID + } + } + if s.id == "" && s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + images, err := s.transport.store.ImagesByDigest(digested.Digest()) + if err == nil && len(images) > 0 { + for _, image := range images { + if imageMatchesRepo(image, s.named) { + loadedImage = image + s.id = image.ID + break + } + } + } + } + } + if s.id == "" { + logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) + } + if loadedImage == nil { + img, err := s.transport.store.Image(s.id) + if err != nil { + return nil, errors.Wrapf(err, "error reading image %q", s.id) + } + loadedImage = img + } + if s.named != nil { + if !imageMatchesRepo(loadedImage, s.named) { + logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) + return nil, ErrNoSuchImage + } + } + // Default to having the image digest that we hand back match the most recently + // added manifest... + if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { + loadedImage.Digest = digest + } + // ... unless the named reference says otherwise, and it matches one of the digests + // in the image. For those cases, set the Digest field to that value, for the + // sake of older consumers that don't know there's a whole list in there now. + if s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + for _, digest := range loadedImage.Digests { + if digest == digested.Digest() { + loadedImage.Digest = digest + break + } + } + } + } + return loadedImage, nil +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + defaultUIDMap: s.transport.defaultUIDMap, + defaultGIDMap: s.transport.defaultGIDMap, + } +} + +// Return a name with a tag or digest, if we have either, else return it bare. +func (s storageReference) DockerReference() reference.Named { + return s.named +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + optionsList := "" + options := s.transport.store.GraphOptions() + if len(options) > 0 { + optionsList = ":" + strings.Join(options, ",") + } + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +func (s storageReference) PolicyConfigurationIdentity() string { + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" + namespaces := []string{} + if s.named != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.named.String()) + } + tagged, isTagged := s.named.(reference.Tagged) + _, isDigested := s.named.(reference.Digested) + if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. + namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) + } + components := strings.Split(s.named.Name(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, s) +} + +func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + img, err := s.resolveImage() + if err != nil { + return err + } + layers, err := s.transport.store.DeleteImage(img.ID, true) + if err == nil { + logrus.Debugf("deleted image %q", img.ID) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(s) +} + +func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(s) +} diff --git a/vendor/github.com/containers/image/v4/storage/storage_transport.go b/vendor/github.com/containers/image/v4/storage/storage_transport.go new file mode 100644 index 000000000..48b909c03 --- /dev/null +++ b/vendor/github.com/containers/image/v4/storage/storage_transport.go @@ -0,0 +1,366 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + minimumTruncatedIDLength = 3 +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetImage retrieves the image from the transport's store that's named + // by the reference. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) + // SetDefaultUIDMap sets the default UID map to use when opening stores. + SetDefaultUIDMap(idmap []idtools.IDMap) + // SetDefaultGIDMap sets the default GID map to use when opening stores. + SetDefaultGIDMap(idmap []idtools.IDMap) + // DefaultUIDMap returns the default UID map used when opening stores. + DefaultUIDMap() []idtools.IDMap + // DefaultGIDMap returns the default GID map used when opening stores. + DefaultGIDMap() []idtools.IDMap +} + +type storageTransport struct { + store storage.Store + defaultUIDMap []idtools.IDMap + defaultGIDMap []idtools.IDMap +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// SetDefaultUIDMap sets the default UID map to use when opening stores. +func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { + s.defaultUIDMap = idmap +} + +// SetDefaultGIDMap sets the default GID map to use when opening stores. +func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { + s.defaultGIDMap = idmap +} + +// DefaultUIDMap returns the default UID map used when opening stores. +func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { + return s.defaultUIDMap +} + +// DefaultGIDMap returns the default GID map used when opening stores. +func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { + return s.defaultGIDMap +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + if ref == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) + } + ref = ref[closeIndex+1:] + } + + // The reference may end with an image ID. Image IDs and digests use the same "@" separator; + // here we only peel away an image ID, and leave digests alone. + split := strings.LastIndex(ref, "@") + id := "" + if split != -1 { + possibleID := ref[split+1:] + if possibleID == "" { + return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) + } + // If it looks like a digest, leave it alone for now. + if _, err := digest.Parse(possibleID); err != nil { + // Otherwise… + if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { + id = possibleID // … it is a full ID + } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { + // … it is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) + } + // We have recognized an image ID; peel it off. + ref = ref[:split] + } + } + + // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + var named reference.Named + // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been + // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. + if ref != "" { + var err error + named, err = reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + named = reference.TagNameOnly(named) + } + + result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) + if err != nil { + return nil, err + } + logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) + return result, nil +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + options, err := storage.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return nil, err + } + options.UIDMap = s.defaultUIDMap + options.GIDMap = s.defaultGIDMap + store, err := storage.GetStore(options) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or +// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", +// tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + var store storage.Store + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + // Peel off a "driver@" from the start. + driverInfo := "" + driverSplit := strings.SplitN(storeSpec, "@", 2) + if len(driverSplit) != 2 { + if storeSpec == "" { + return nil, ErrInvalidReference + } + } else { + driverInfo = driverSplit[0] + if driverInfo == "" { + return nil, ErrInvalidReference + } + storeSpec = driverSplit[1] + if storeSpec == "" { + return nil, ErrInvalidReference + } + } + // Peel off a ":options" from the end. + var options []string + optionsSplit := strings.SplitN(storeSpec, ":", 2) + if len(optionsSplit) == 2 { + options = strings.Split(optionsSplit[1], ",") + storeSpec = optionsSplit[0] + } + // Peel off a "+runroot" from the new end. + runRootInfo := "" + runRootSplit := strings.SplitN(storeSpec, "+", 2) + if len(runRootSplit) == 2 { + runRootInfo = runRootSplit[1] + storeSpec = runRootSplit[0] + } + // The rest is our graph root. + rootInfo := storeSpec + // Check that any paths are absolute paths. + if rootInfo != "" && !filepath.IsAbs(rootInfo) { + return nil, ErrPathNotAbsolute + } + if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: driverInfo, + GraphRoot: rootInfo, + RunRoot: runRootInfo, + GraphDriverOptions: options, + UIDMap: s.defaultUIDMap, + GIDMap: s.defaultGIDMap, + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // We didn't have a store spec, so use the default. + store2, err := s.GetStore() + if err != nil { + return nil, err + } + store = store2 + } + return s.ParseStoreReference(store, reference) +} + +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref != nil { + if img, err := store.Image(dref.String()); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + tmpRef := *sref + if img, err := tmpRef.resolveImage(); err == nil { + return img, nil + } + } + return nil, storage.ErrImageUnknown +} + +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: scope specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + + fields := strings.SplitN(scope, "@", 3) + switch len(fields) { + case 1: // name only + case 2: // name:tag@ID or name[:tag]@digest + if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { + if _, digestErr := digest.Parse(fields[1]); digestErr != nil { + return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) + } + } + case 3: // name[:tag]@digest@ID + if _, err := digest.Parse(fields[1]); err != nil { + return err + } + if _, err := digest.Parse("sha256:" + fields[2]); err != nil { + return err + } + default: // Coverage: This should never happen + return errors.New("Internal error: unexpected number of fields form strings.SplitN") + } + // As for field[0], if it is non-empty at all: + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} diff --git a/vendor/github.com/containers/image/v4/tarball/doc.go b/vendor/github.com/containers/image/v4/tarball/doc.go new file mode 100644 index 000000000..ebbe156bd --- /dev/null +++ b/vendor/github.com/containers/image/v4/tarball/doc.go @@ -0,0 +1,48 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// package main +// +// import ( +// "fmt" +// +// cp "github.com/containers/image/v4/copy" +// "github.com/containers/image/v4/tarball" +// "github.com/containers/image/v4/transports/alltransports" +// +// imgspecv1 "github.com/containers/image/v4/transports/alltransports" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// err = cp.Image(nil, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_reference.go b/vendor/github.com/containers/image/v4/tarball/tarball_reference.go new file mode 100644 index 000000000..d33c20de1 --- /dev/null +++ b/vendor/github.com/containers/image/v4/tarball/tarball_reference.go @@ -0,0 +1,94 @@ +package tarball + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/v4/docker/reference" + "github.com/containers/image/v4/image" + "github.com/containers/image/v4/types" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + transport types.ImageTransport + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + for k, v := range annotations { + r.annotations[k] = v + } + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return r.transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx, sys) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %v", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) +} diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_src.go b/vendor/github.com/containers/image/v4/tarball/tarball_src.go new file mode 100644 index 000000000..ead1a50bd --- /dev/null +++ b/vendor/github.com/containers/image/v4/tarball/tarball_src.go @@ -0,0 +1,268 @@ +package tarball + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/v4/types" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type tarballImageSource struct { + reference tarballReference + filenames []string + diffIDs []digest.Digest + diffSizes []int64 + blobIDs []digest.Digest + blobSizes []int64 + blobTypes []string + config []byte + configID digest.Digest + configSize int64 + manifest []byte +} + +func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + // Gather up the digests, sizes, and date information for all of the files. + filenames := []string{} + diffIDs := []digest.Digest{} + diffSizes := []int64{} + blobIDs := []digest.Digest{} + blobSizes := []int64{} + blobTimes := []time.Time{} + blobTypes := []string{} + for _, filename := range r.filenames { + var file *os.File + var err error + var blobSize int64 + var blobTime time.Time + var reader io.Reader + if filename == "-" { + blobSize = int64(len(r.stdin)) + blobTime = time.Now() + reader = bytes.NewReader(r.stdin) + } else { + file, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + } + blobSize = fileinfo.Size() + blobTime = fileinfo.ModTime() + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := pgzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + n, err := io.Copy(ioutil.Discard, reader) + if err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + filenames = append(filenames, filename) + diffIDs = append(diffIDs, diffIDdigester.Digest()) + diffSizes = append(diffSizes, n) + blobIDs = append(blobIDs, blobIDdigester.Digest()) + blobSizes = append(blobSizes, blobSize) + blobTimes = append(blobTimes, blobTime) + blobTypes = append(blobTypes, layerType) + } + + // Build the rootfs and history for the configuration blob. + rootfs := imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + created := time.Time{} + history := []imgspecv1.History{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + for i := range diffIDs { + createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) + history = append(history, imgspecv1.History{ + Created: &blobTimes[i], + CreatedBy: createdBy, + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTimes[i]) { + created = blobTimes[i] + } + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = rootfs + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + configSize := int64(len(configBytes)) + + // Populate a manifest with the configuration blob and the file as the single layer. + layerDescriptors := []imgspecv1.Descriptor{} + for i := range blobIDs { + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobIDs[i], + Size: blobSizes[i], + MediaType: blobTypes[i], + }) + } + annotations := make(map[string]string) + for k, v := range r.annotations { + annotations[k] = v + } + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: configSize, + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: annotations, + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + reference: *r, + filenames: filenames, + diffIDs: diffIDs, + diffSizes: diffSizes, + blobIDs: blobIDs, + blobSizes: blobSizes, + blobTypes: blobTypes, + config: configBytes, + configID: configID, + configSize: configSize, + manifest: manifestBytes, + } + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (is *tarballImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + // We should only be asked about things in the manifest. Maybe the configuration blob. + if blobinfo.Digest == is.configID { + return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + } + // Maybe one of the layer blobs. + for i := range is.blobIDs { + if blobinfo.Digest == is.blobIDs[i] { + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + } + return reader, is.blobSizes[i], nil + } + } + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return nil, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (*tarballImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_transport.go b/vendor/github.com/containers/image/v4/tarball/tarball_transport.go new file mode 100644 index 000000000..84874cfbf --- /dev/null +++ b/vendor/github.com/containers/image/v4/tarball/tarball_transport.go @@ -0,0 +1,66 @@ +package tarball + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + ref := &tarballReference{ + transport: t, + filenames: filenames, + stdin: stdin, + } + return ref, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go new file mode 100644 index 000000000..ae68fb8e6 --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go @@ -0,0 +1,46 @@ +package alltransports + +import ( + "strings" + + // register all known transports + // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating + // a transport. + _ "github.com/containers/image/v4/directory" + _ "github.com/containers/image/v4/docker" + _ "github.com/containers/image/v4/docker/archive" + _ "github.com/containers/image/v4/oci/archive" + _ "github.com/containers/image/v4/oci/layout" + _ "github.com/containers/image/v4/openshift" + _ "github.com/containers/image/v4/tarball" + // The ostree transport is registered by ostree*.go + // The storage transport is registered by storage*.go + "github.com/containers/image/v4/transports" + "github.com/containers/image/v4/types" + "github.com/pkg/errors" +) + +// ParseImageName converts a URL-like image name to a types.ImageReference. +func ParseImageName(imgName string) (types.ImageReference, error) { + // Keep this in sync with TransportFromImageName! + parts := strings.SplitN(imgName, ":", 2) + if len(parts) != 2 { + return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) + } + transport := transports.Get(parts[0]) + if transport == nil { + return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) + } + return transport.ParseReference(parts[1]) +} + +// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when +// the transport is unknown or when the input is invalid. +func TransportFromImageName(imageName string) types.ImageTransport { + // Keep this in sync with ParseImageName! + parts := strings.SplitN(imageName, ":", 2) + if len(parts) == 2 { + return transports.Get(parts[0]) + } + return nil +} diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go new file mode 100644 index 000000000..d3fc18b2c --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go @@ -0,0 +1,8 @@ +// +build !containers_image_docker_daemon_stub + +package alltransports + +import ( + // Register the docker-daemon transport + _ "github.com/containers/image/v4/docker/daemon" +) diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go new file mode 100644 index 000000000..82e055377 --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_docker_daemon_stub + +package alltransports + +import "github.com/containers/image/v4/transports" + +func init() { + transports.Register(transports.NewStubTransport("docker-daemon")) +} diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v4/transports/alltransports/ostree.go new file mode 100644 index 000000000..891696616 --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/alltransports/ostree.go @@ -0,0 +1,8 @@ +// +build containers_image_ostree,linux + +package alltransports + +import ( + // Register the ostree transport + _ "github.com/containers/image/v4/ostree" +) diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go new file mode 100644 index 000000000..892518d5c --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go @@ -0,0 +1,9 @@ +// +build !containers_image_ostree !linux + +package alltransports + +import "github.com/containers/image/v4/transports" + +func init() { + transports.Register(transports.NewStubTransport("ostree")) +} diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/storage.go b/vendor/github.com/containers/image/v4/transports/alltransports/storage.go new file mode 100644 index 000000000..96f3e17fc --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/alltransports/storage.go @@ -0,0 +1,8 @@ +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/v4/storage" +) diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go new file mode 100644 index 000000000..14c942116 --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/v4/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/image/v4/transports/stub.go b/vendor/github.com/containers/image/v4/transports/stub.go new file mode 100644 index 000000000..e3ee62031 --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/stub.go @@ -0,0 +1,36 @@ +package transports + +import ( + "fmt" + + "github.com/containers/image/v4/types" +) + +// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +type stubTransport string + +// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +func NewStubTransport(name string) types.ImageTransport { + return stubTransport(name) +} + +// Name returns the name of the transport, which must be unique among other transports. +func (s stubTransport) Name() string { + return string(s) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { + return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { + // Allowing any reference in here allows tools with some transports stubbed-out to still + // use signature verification policies which refer to these stubbed-out transports. + // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . + return nil +} diff --git a/vendor/github.com/containers/image/v4/transports/transports.go b/vendor/github.com/containers/image/v4/transports/transports.go new file mode 100644 index 000000000..8bdb46b4b --- /dev/null +++ b/vendor/github.com/containers/image/v4/transports/transports.go @@ -0,0 +1,90 @@ +package transports + +import ( + "fmt" + "sort" + "sync" + + "github.com/containers/image/v4/types" +) + +// knownTransports is a registry of known ImageTransport instances. +type knownTransports struct { + transports map[string]types.ImageTransport + mu sync.Mutex +} + +func (kt *knownTransports) Get(k string) types.ImageTransport { + kt.mu.Lock() + t := kt.transports[k] + kt.mu.Unlock() + return t +} + +func (kt *knownTransports) Remove(k string) { + kt.mu.Lock() + delete(kt.transports, k) + kt.mu.Unlock() +} + +func (kt *knownTransports) Add(t types.ImageTransport) { + kt.mu.Lock() + defer kt.mu.Unlock() + name := t.Name() + if t := kt.transports[name]; t != nil { + panic(fmt.Sprintf("Duplicate image transport name %s", name)) + } + kt.transports[name] = t +} + +var kt *knownTransports + +func init() { + kt = &knownTransports{ + transports: make(map[string]types.ImageTransport), + } +} + +// Get returns the transport specified by name or nil when unavailable. +func Get(name string) types.ImageTransport { + return kt.Get(name) +} + +// Delete deletes a transport from the registered transports. +func Delete(name string) { + kt.Remove(name) +} + +// Register registers a transport. +func Register(t types.ImageTransport) { + kt.Add(t) +} + +// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that +// ParseImageName(ImageName(reference)) returns an equivalent reference. +// +// This is the generally recommended way to refer to images in the UI. +// +// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +func ImageName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.StringWithinTransport() +} + +// ListNames returns a list of non deprecated transport names. +// Deprecated transports can be used, but are not presented to users. +func ListNames() []string { + kt.mu.Lock() + defer kt.mu.Unlock() + deprecated := map[string]bool{ + "atomic": true, + } + var names []string + for _, transport := range kt.transports { + if !deprecated[transport.Name()] { + names = append(names, transport.Name()) + } + } + sort.Strings(names) + return names +} diff --git a/vendor/github.com/containers/image/v4/types/types.go b/vendor/github.com/containers/image/v4/types/types.go new file mode 100644 index 000000000..af11a2b21 --- /dev/null +++ b/vendor/github.com/containers/image/v4/types/types.go @@ -0,0 +1,535 @@ +package types + +import ( + "context" + "io" + "time" + + "github.com/containers/image/v4/docker/reference" + compression "github.com/containers/image/v4/pkg/compression/types" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTransport is a top-level namespace for ways to to store/load an image. +// It should generally correspond to ImageSource/ImageDestination implementations. +// +// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. +// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS +// (or, even, IPv4 or IPv6). +// +// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. +// For example, several different ImageTransport implementations may be based on local filesystem paths, +// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) +// +// See also transports.KnownTransports. +type ImageTransport interface { + // Name returns the name of the transport, which must be unique among other transports. + Name() string + // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. + ParseReference(reference string) (ImageReference, error) + // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys + // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). + // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. + // scope passed to this function will not be "", that value is always allowed. + ValidatePolicyConfigurationScope(scope string) error +} + +// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. +// +// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening +// within an ImageTransport.ParseReference() or equivalent API creating the reference object. +// That's also why the various identification/formatting methods of this type do not support returning errors. +// +// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside +// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. +type ImageReference interface { + Transport() ImageTransport + // StringWithinTransport returns a string representation of the reference, which MUST be such that + // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. + // NOTE: The returned string is not promised to be equal to the original input to ParseReference; + // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; + // instead, see transports.ImageName(). + StringWithinTransport() string + + // DockerReference returns a Docker reference associated with this reference + // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, + // not e.g. after redirect or alias processing), or nil if unknown/not applicable. + DockerReference() reference.Named + + // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. + // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; + // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical + // (i.e. various references with exactly the same semantics should return the same configuration identity) + // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but + // not required/guaranteed that it will be a valid input to Transport().ParseReference(). + // Returns "" if configuration identities for these references are not supported. + PolicyConfigurationIdentity() string + + // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search + // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed + // in order, terminating on first match, and an implicit "" is always checked at the end. + // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), + // and each following element to be a prefix of the element preceding it. + PolicyConfigurationNamespaces() []string + + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) + // NewImageDestination returns a types.ImageDestination for this reference. + // The caller must call .Close() on the returned ImageDestination. + NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) + + // DeleteImage deletes the named image from the registry, if supported. + DeleteImage(ctx context.Context, sys *SystemContext) error +} + +// LayerCompression indicates if layers must be compressed, decompressed or preserved +type LayerCompression int + +const ( + // PreserveOriginal indicates the layer must be preserved, ie + // no compression or decompression. + PreserveOriginal LayerCompression = iota + // Decompress indicates the layer must be decompressed + Decompress + // Compress indicates the layer must be compressed + Compress +) + +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string + Annotations map[string]string + MediaType string + // CompressionOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer should be preserved or (de)compressed. The + // field defaults to preserve the original layer. + CompressionOperation LayerCompression + // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct + // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be + // set when `CompressionOperation == Compress`. + CompressionAlgorithm *compression.Algorithm +} + +// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cahce should just fall back to copying the blobs the usual way. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). +// This is primarily useful for copying images around; for examining their properties, Image (below) +// is usually more useful. +// Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. +type ImageSource interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Close removes resources associated with an initialized ImageSource, if any. + Close() error + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). + // It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool + // GetSignatures returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(ctx context.Context) ([]BlobInfo, error) +} + +// ImageDestination is a service, possibly remote (= slow), to store components of a single image. +// +// There is a specific required order for some of the calls: +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) +// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. +// +// Each ImageDestination should eventually be closed by calling Close(). +type ImageDestination interface { + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, + // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. + Reference() ImageReference + // Close removes resources associated with an initialized ImageDestination, if any. + Close() error + + // SupportedManifestMIMETypes tells which manifest mime types the destination supports + // If an empty slice or nil it's returned, then any mime type can be tried to upload + SupportedManifestMIMETypes() []string + // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. + // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. + SupportsSignatures(ctx context.Context) error + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression() LayerCompression + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. + MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + + // PutBlob writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // May update cache. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + // PutManifest writes manifest to the destination. + // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. + // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), + // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. + PutManifest(ctx context.Context, manifest []byte) error + PutSignatures(ctx context.Context, signatures [][]byte) error + // Commit marks the process of storing the image as successful and asks for the image to be persisted. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before Commit() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) + Commit(ctx context.Context) error +} + +// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, +// refuses specifically this manifest type, but may accept a different manifest type. +type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ManifestTypeRejectedError) Error() string { + return e.Err.Error() +} + +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +type UnparsedImage interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. + Manifest(ctx context.Context) ([]byte, string, error) + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. + Signatures(ctx context.Context) ([][]byte, error) +} + +// Image is the primary API for inspecting properties of images. +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*v1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(context.Context) ([]BlobInfo, error) + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + EmbeddedDockerReference reference.Named + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. +} + +// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. +// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported +// for other manifest types. +type ImageInspectInfo struct { + Tag string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Os string + Layers []string + Env []string +} + +// DockerAuthConfig contains authorization information for connecting to a registry. +// the value of Username and Password can be empty for accessing the registry anonymously +type DockerAuthConfig struct { + Username string + Password string +} + +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b == true { + o = OptionalBoolTrue + } + return o +} + +// SystemContext allows parameterizing access to implicitly-accessed resources, +// like configuration files in /etc and users' login state in their home directory. +// Various components can share the same field only if their semantics is exactly +// the same; if in doubt, add a new field. +// It is always OK to pass nil instead of a SystemContext. +type SystemContext struct { + // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). + // Not used for any of the more specific path overrides available in this struct. + // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). + // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . + // and there is no need to worry about the environment.) + // NOTE: This does NOT affect paths starting by $HOME. + RootForImplicitAbsolutePaths string + + // === Global configuration overrides === + // If not "", overrides the system's default path for signature.Policy configuration. + SignaturePolicyPath string + // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) + RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // If not "", overrides the default path for the authentication file + AuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string + + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool + + // === docker.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when talking to a Docker Registry. + DockerCertPath string + // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. + // Ignored if DockerCertPath is non-empty. + DockerPerHostCertDirPath string + // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify OptionalBool + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + DockerAuthConfig *DockerAuthConfig + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool + // Directory to use for OSTree temporary files + OSTreeTmpDirPath string + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int +} + +// ProgressProperties is used to pass information from the copy code to a monitor which +// can use the real-time information to produce output or react to changes. +type ProgressProperties struct { + Artifact BlobInfo + Offset uint64 +} diff --git a/vendor/github.com/containers/image/v4/version/version.go b/vendor/github.com/containers/image/v4/version/version.go new file mode 100644 index 000000000..2fa6706df --- /dev/null +++ b/vendor/github.com/containers/image/v4/version/version.go @@ -0,0 +1,18 @@ +package version + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 4 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 1 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go deleted file mode 100644 index f1e795d9b..000000000 --- a/vendor/github.com/containers/image/version/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package version - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 3 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/libtrust/CONTRIBUTING.md b/vendor/github.com/containers/libtrust/CONTRIBUTING.md new file mode 100644 index 000000000..05be0f8ab --- /dev/null +++ b/vendor/github.com/containers/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/containers/libtrust/LICENSE b/vendor/github.com/containers/libtrust/LICENSE new file mode 100644 index 000000000..27448585a --- /dev/null +++ b/vendor/github.com/containers/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/libtrust/MAINTAINERS b/vendor/github.com/containers/libtrust/MAINTAINERS new file mode 100644 index 000000000..9768175fe --- /dev/null +++ b/vendor/github.com/containers/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/containers/libtrust/README.md b/vendor/github.com/containers/libtrust/README.md new file mode 100644 index 000000000..dcffb31ae --- /dev/null +++ b/vendor/github.com/containers/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/containers/libtrust/certificates.go b/vendor/github.com/containers/libtrust/certificates.go new file mode 100644 index 000000000..3dcca33cb --- /dev/null +++ b/vendor/github.com/containers/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/containers/libtrust/doc.go b/vendor/github.com/containers/libtrust/doc.go new file mode 100644 index 000000000..ec5d2159c --- /dev/null +++ b/vendor/github.com/containers/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/containers/libtrust/ec_key.go b/vendor/github.com/containers/libtrust/ec_key.go new file mode 100644 index 000000000..0ee1b9110 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key.go @@ -0,0 +1,422 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + r, s, err := k.sign(data, hashID) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/ec_key_no_openssl.go b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go new file mode 100644 index 000000000..d6cdaca3f --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go @@ -0,0 +1,23 @@ +// +build !libtrust_openssl + +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + return ecdsa.Sign(rand.Reader, k.PrivateKey, hash) +} diff --git a/vendor/github.com/containers/libtrust/ec_key_openssl.go b/vendor/github.com/containers/libtrust/ec_key_openssl.go new file mode 100644 index 000000000..4137511f1 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_openssl.go @@ -0,0 +1,24 @@ +// +build libtrust_openssl + +package libtrust + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hId := k.signatureAlgorithm.HashID() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data: %s", err) + } + + return ecdsa.HashSign(rand.Reader, k.PrivateKey, buf.Bytes(), hId) +} diff --git a/vendor/github.com/containers/libtrust/filter.go b/vendor/github.com/containers/libtrust/filter.go new file mode 100644 index 000000000..5b2b4fca6 --- /dev/null +++ b/vendor/github.com/containers/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/containers/libtrust/hash.go b/vendor/github.com/containers/libtrust/hash.go new file mode 100644 index 000000000..a2df787dd --- /dev/null +++ b/vendor/github.com/containers/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/containers/libtrust/jsonsign.go b/vendor/github.com/containers/libtrust/jsonsign.go new file mode 100644 index 000000000..cb2ca9a76 --- /dev/null +++ b/vendor/github.com/containers/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/containers/libtrust/key.go b/vendor/github.com/containers/libtrust/key.go new file mode 100644 index 000000000..73642db2a --- /dev/null +++ b/vendor/github.com/containers/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/containers/libtrust/key_files.go b/vendor/github.com/containers/libtrust/key_files.go new file mode 100644 index 000000000..c526de545 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/containers/libtrust/key_manager.go b/vendor/github.com/containers/libtrust/key_manager.go new file mode 100644 index 000000000..9a98ae357 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/containers/libtrust/rsa_key.go b/vendor/github.com/containers/libtrust/rsa_key.go new file mode 100644 index 000000000..dac4cacf2 --- /dev/null +++ b/vendor/github.com/containers/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/util.go b/vendor/github.com/containers/libtrust/util.go new file mode 100644 index 000000000..a5a101d3f --- /dev/null +++ b/vendor/github.com/containers/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index c6c8fb40e..d5b6cbbea 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -4,6 +4,7 @@ Aanand Prasad Aaron Davidson Aaron Feng +Aaron Hnatiw Aaron Huslage Aaron L. Xu Aaron Lehmann @@ -44,7 +45,7 @@ Ajey Charantimath ajneu Akash Gupta Akihiro Matsushima -Akihiro Suda +Akihiro Suda Akim Demaille Akira Koyasu Akshay Karle @@ -81,6 +82,7 @@ Alexandre Garnier Alexandre González Alexandre Jomin Alexandru Sfirlogea +Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin @@ -118,6 +120,7 @@ Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrei Gherzan +Andrei Vagin Andrew C. Bodine Andrew Clay Shafer Andrew Duckworth @@ -137,6 +140,7 @@ Andrew Po Andrew Weiss Andrew Williams Andrews Medina +Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins @@ -151,6 +155,7 @@ Andy Wilson Anes Hasicic Anil Belur Anil Madhavapeddy +Ankit Jain Ankush Agarwal Anonmily Anran Qiao @@ -182,6 +187,7 @@ Asad Saeeduddin Asbjørn Enge averagehuman Avi Das +Avi Kivity Avi Miller Avi Vaid ayoshitake @@ -195,6 +201,7 @@ bdevloed Ben Bonnefoy Ben Firshman Ben Golub +Ben Gould Ben Hall Ben Sargent Ben Severson @@ -208,11 +215,13 @@ Benoit Chesneau Bernerd Schaefer Bernhard M. Wiedemann Bert Goethals +Bevisy Zhang Bharath Thiruveedula Bhiraj Butala Bhumika Bayani Bilal Amarni Bill Wang +Bily Zhang Bin Liu Bingshen Wang Blake Geno @@ -302,6 +311,7 @@ Chen Min Chen Mingjie Chen Qiu Cheng-mean Liu +Chengfei Shang Chengguang Xu chenyuzhu Chetan Birajdar @@ -354,6 +364,7 @@ Cody Roseborough Coenraad Loubser Colin Dunklau Colin Hebert +Colin Panisset Colin Rice Colin Walters Collin Guarino @@ -389,6 +400,7 @@ Dan Levy Dan McPherson Dan Stine Dan Williams +Dani Hodovic Dani Louca Daniel Antlinger Daniel Dao @@ -442,6 +454,7 @@ David Mackey David Mat David Mcanulty David McKay +David P Hilton David Pelaez David R. Jenni David Röthlisberger @@ -459,6 +472,7 @@ dcylabs Debayan De Deborah Gertrude Digges deed02392 +Deep Debroy Deng Guangxing Deni Bertovic Denis Defreyne @@ -483,6 +497,7 @@ Dieter Reuter Dillon Dixon Dima Stopel Dimitri John Ledkov +Dimitris Mandalidis Dimitris Rozakis Dimitry Andric Dinesh Subhraveti @@ -496,6 +511,7 @@ Dmitri Shuralyov Dmitry Demeshchuk Dmitry Gusev Dmitry Kononenko +Dmitry Sharshakov Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok @@ -586,6 +602,7 @@ Ewa Czechowska Eystein Måløy Stenberg ezbercih Ezra Silvera +Fabian Kramm Fabian Lauer Fabian Raetz Fabiano Rosas @@ -644,6 +661,7 @@ Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter Frieder Bluemle +Fu JinLin Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse @@ -676,6 +694,7 @@ Ghislain Bourgeois Giampaolo Mancini Gianluca Borello Gildas Cuisinier +Giovan Isa Musthofa gissehel Giuseppe Mazzotta Gleb Fotengauer-Malinovskiy @@ -708,6 +727,7 @@ gwx296173 Günter Zöchbauer haikuoliu Hakan Özler +Hamish Hutchings Hans Kristian Flaatten Hans Rødtang Hao Shu Wei @@ -715,6 +735,7 @@ Hao Zhang <21521210@zju.edu.cn> Harald Albers Harley Laue Harold Cooper +Harrison Turton Harry Zhang Harshal Patil Harshal Patil @@ -726,6 +747,7 @@ Hector Castro Helen Xie Henning Sprang Hiroshi Hatake +Hiroyuki Sasagawa Hobofan Hollie Teal Hong Xu @@ -766,6 +788,7 @@ Ilya Khlopotov imre Fitos inglesp Ingo Gottwald +Innovimax Isaac Dupree Isabel Jimenez Isao Jonas @@ -800,6 +823,7 @@ James Mills James Nesbitt James Nugent James Turnbull +James Watkins-Harvey Jamie Hannaford Jamshid Afshar Jan Keromnes @@ -832,6 +856,7 @@ jaxgeller Jay Jay Jay Kamat +Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido Jean-Christophe Berthon @@ -866,7 +891,9 @@ Jessica Frazelle Jezeniel Zapanta Jhon Honce Ji.Zhilong +Jian Liao Jian Zhang +Jiang Jinyang Jie Luo Jihyun Hwang Jilles Oldenbeuving @@ -877,14 +904,14 @@ Jim Perrin Jimmy Cuadra Jimmy Puckett Jimmy Song -jimmyxian Jinsoo Park +Jintao Zhang +Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka -jjy -jmzwcn Joao Fernandes +Joao Trindade Joe Beda Joe Doliner Joe Ferguson @@ -923,6 +950,7 @@ Jon Johnson Jon Surrell Jon Wedaman Jonas Pfenniger +Jonathan A. Schweder Jonathan A. Sternberg Jonathan Boulle Jonathan Camp @@ -943,7 +971,7 @@ Jordan Jennings Jordan Sissel Jorge Marin Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez +Jose Diaz-Gonzalez Joseph Anthony Pasquale Holsten Joseph Hager Joseph Kern @@ -997,7 +1025,7 @@ kargakis Karl Grzeszczak Karol Duleba Karthik Karanth -Karthik Nayak +Karthik Nayak Kasper Fabæch Brandt Kate Heddleston Katie McLaughlin @@ -1053,11 +1081,13 @@ Krasimir Georgiev Kris-Mikael Krister Kristian Haugene Kristina Zabunova +Krystian Wojcicki Kun Zhang Kunal Kushwaha Kunal Tyagi Kyle Conroy Kyle Linden +Kyle Wuolle kyu Lachlan Coote Lai Jiangshan @@ -1095,6 +1125,8 @@ Liana Lo Liang Mingqiang Liang-Chi Hsieh Liao Qingwei +Lifubang +Lihua Tang Lily Guo limsy Lin Lu @@ -1113,7 +1145,7 @@ Lloyd Dewolf Lokesh Mandvekar longliqiang88 <394564827@qq.com> Lorenz Leutgeb -Lorenzo Fontana +Lorenzo Fontana Lotus Fenn Louis Opter Luca Favatella @@ -1123,6 +1155,7 @@ Luca-Bogdan Grigorescu Lucas Chan Lucas Chi Lucas Molas +Lucas Silvestre Luciano Mores Luis Martínez de Bartolomé Izquierdo Luiz Svoboda @@ -1171,6 +1204,7 @@ Marius Gundersen Marius Sturm Marius Voila Mark Allen +Mark Jeromin Mark McGranaghan Mark McKinstry Mark Milstein @@ -1221,6 +1255,7 @@ Matthias Klumpp Matthias Kühnle Matthias Rampke Matthieu Hauglustaine +Mattias Jernberg Mauricio Garavaglia mauriyouth Max Shytikov @@ -1229,6 +1264,8 @@ Maxim Ivanov Maxim Kulkin Maxim Treskin Maxime Petazzoni +Maximiliano Maccanti +Maxwell Meaglith Ma meejah Megan Kostick @@ -1302,6 +1339,7 @@ Mitch Capper Mizuki Urushida mlarcher Mohammad Banikazemi +Mohammad Nasirifar Mohammed Aaqib Ansari Mohit Soni Moorthy RS @@ -1326,6 +1364,7 @@ Nan Monnand Deng Naoki Orii Natalie Parker Natanael Copa +Natasha Jarus Nate Brennand Nate Eagleson Nate Jones @@ -1375,6 +1414,7 @@ Noah Treuhaft NobodyOnSE noducks Nolan Darilek +Noriki Nakamura nponeccop Nuutti Kotivuori nzwsch @@ -1386,8 +1426,11 @@ Ohad Schneider ohmystack Ole Reifschneider Oliver Neal +Oliver Reason Olivier Gambier Olle Jonsson +Olli Janatuinen +Omri Shiv Oriol Francès Oskar Niburski Otto Kekäläinen @@ -1443,6 +1486,7 @@ Peter Edge Peter Ericson Peter Esbensen Peter Jaffe +Peter Kang Peter Malmgren Peter Salvatore Peter Volpe @@ -1486,6 +1530,7 @@ Quentin Brossard Quentin Perez Quentin Tayssier r0n22 +Radostin Stoyanov Rafal Jeczalik Rafe Colton Raghavendra K T @@ -1499,6 +1544,7 @@ Ralph Bean Ramkumar Ramachandra Ramon Brooker Ramon van Alteren +RaviTeja Pothana Ray Tsang ReadmeCritic Recursive Madman @@ -1548,6 +1594,7 @@ Roel Van Nyen Roger Peppe Rohit Jnagal Rohit Kadam +Rohit Kapur Rojin George Roland Huß Roland Kammerer @@ -1557,6 +1604,9 @@ Roman Dudin Roman Strashkin Ron Smits Ron Williams +Rong Gao +Rong Zhang +Rongxiang Song root root root @@ -1568,6 +1618,7 @@ Rovanion Luckey Royce Remer Rozhnov Alexandr Rudolph Gottesheim +Rui Cao Rui Lopes Runshen Zhu Russ Magee @@ -1589,6 +1640,7 @@ Ryan Wallner Ryan Zhang ryancooper7 RyanDeng +Ryo Nakao Rémy Greinhofer s. rannou s00318865 @@ -1619,6 +1671,7 @@ Santhosh Manohar sapphiredev Sargun Dhillon Sascha Andres +Sascha Grunert Satnam Singh Satoshi Amemiya Satoshi Tagomori @@ -1645,6 +1698,7 @@ Serge Hallyn Sergey Alekseev Sergey Evstifeev Sergii Kabashniuk +Sergio Lopez Serhat Gülçiçek SeungUkLee Sevki Hasirci @@ -1674,6 +1728,7 @@ Sidhartha Mani sidharthamani Silas Sewell Silvan Jegen +Simão Reis Simei He Simon Eskildsen Simon Ferquel @@ -1700,7 +1755,7 @@ Stefan Berger Stefan J. Wernli Stefan Praszalowicz Stefan S. -Stefan Scherer +Stefan Scherer Stefan Staudenmeyer Stefan Weil Stephan Spindler @@ -1900,6 +1955,7 @@ Wassim Dhif Wayne Chang Wayne Song Weerasak Chongnguluam +Wei Fu Wei Wu Wei-Ting Kuo weipeng @@ -1929,11 +1985,16 @@ WiseTrem Wolfgang Powisch Wonjun Kim xamyzhao +Xian Chaobo Xianglin Gao Xianlu Bird +Xiao YongBiao XiaoBing Jiang +Xiaodong Zhang +Xiaoxi He Xiaoxu Chen Xiaoyu Zhang +xichengliudui <1693291525@qq.com> xiekeyang Ximo Guanter Gonzálbez Xinbo Weng @@ -1961,6 +2022,7 @@ Yihang Ho Ying Li Yohei Ueda Yong Tang +Yongxin Li Yongzhi Pan Yosef Fertel You-Sheng Yang (楊有勝) @@ -1971,10 +2033,12 @@ Yu Peng Yu-Ju Hong Yuan Sun Yuanhong Peng +Yue Zhang Yuhao Fang Yuichiro Kaneko Yunxiang Huang Yurii Rashkovskii +Yusuf Tarık Günaydın Yves Junqueira Zac Dover Zach Borboa @@ -1991,8 +2055,10 @@ ZhangHang zhangxianwei Zhenan Ye <21551168@zju.edu.cn> zhenghenghuo +Zhenhai Gao Zhenkun Bi Zhou Hao +Zhoulin Xie Zhu Guihua Zhu Kunjia Zhuoyun Wei diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index aa146cdae..1565e2af6 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.40" + DefaultVersion = "1.41" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 8652c368c..38ca5329e 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.40" +basePath: "/v1.41" info: title: "Docker Engine API" - version: "1.40" + version: "1.41" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | @@ -49,8 +49,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.40) is used. - For example, calling `/info` is the same as calling `/v1.40/info`. Using the + If you omit the version-prefix, the current version of the API (v1.41) is used. + For example, calling `/info` is the same as calling `/v1.41/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -210,6 +210,43 @@ definitions: PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + ThrottleDevice: type: "object" properties: @@ -421,10 +458,11 @@ definitions: items: type: "string" example: "c 13:* rwm" - DiskQuota: - description: "Disk limit (in bytes)." - type: "integer" - format: "int64" + DeviceRequests: + description: "a list of requests for devices to be sent to device drivers" + type: "array" + items: + $ref: "#/definitions/DeviceRequest" KernelMemory: description: "Kernel memory limit in bytes." type: "integer" @@ -665,6 +703,19 @@ definitions: description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'" items: type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." @@ -1103,6 +1154,7 @@ definitions: type: "object" additionalProperties: type: "array" + x-nullable: true items: $ref: "#/definitions/PortBinding" example: @@ -1127,7 +1179,6 @@ definitions: PortBinding represents a binding between a host IP address and a host port. type: "object" - x-nullable: true properties: HostIp: description: "Host IP address that the container's port is mapped to." @@ -2831,6 +2882,18 @@ definitions: type: "object" additionalProperties: type: "string" + # This option is not used by Windows containers + Capabilities: + type: "array" + description: | + A list of kernel capabilities to be available for container (this overrides the default set). + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay @@ -3767,7 +3830,7 @@ definitions: description: | The driver to use for managing cgroups. type: "string" - enum: ["cgroupfs", "systemd"] + enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" NEventsListener: @@ -3789,6 +3852,17 @@ definitions: or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the @@ -4002,7 +4076,7 @@ definitions: SecurityOptions: description: | List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, and user-namespaces (userns). + apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value @@ -4015,6 +4089,7 @@ definitions: - "name=seccomp,profile=default" - "name=selinux" - "name=userns" + - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. @@ -4569,9 +4644,9 @@ paths: parameters: - name: "name" in: "query" - description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`." type: "string" - pattern: "/?[a-zA-Z0-9_-]+" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" @@ -5135,15 +5210,15 @@ paths: Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" 200: - description: "logs returned as a string in response body" + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not + set Content-Type. schema: type: "string" + format: "binary" 404: description: "no such container" schema: @@ -5163,10 +5238,7 @@ paths: type: "string" - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" @@ -5427,7 +5499,7 @@ paths: /containers/{id}/resize: post: summary: "Resize a container TTY" - description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" @@ -6181,12 +6253,17 @@ paths: in: "query" description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." type: "string" + - name: "copyUIDGID" + in: "query" + description: "If “1”, “true”, then it will copy UID/GID maps to the dest file or dir" + type: "string" - name: "inputStream" in: "body" required: true description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" + format: "binary" tags: ["Container"] /containers/prune: post: @@ -6459,6 +6536,11 @@ paths: description: "Target build stage" type: "string" default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" responses: 200: description: "no error" @@ -9066,7 +9148,9 @@ paths: type: "string" RemoteAddrs: description: "Addresses of manager nodes already participating in the swarm." - type: "string" + type: "array" + items: + type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" @@ -9522,23 +9606,16 @@ paths: get: summary: "Get service logs" description: | - Get `stdout` and `stderr` logs from a service. + Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" 200: - description: "logs returned as a string in response body" + description: "logs returned as a stream in response body" schema: type: "string" + format: "binary" 404: description: "no such service" schema: @@ -9567,10 +9644,7 @@ paths: default: false - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" @@ -9779,23 +9853,16 @@ paths: get: summary: "Get task logs" description: | - Get `stdout` and `stderr` logs from a task. + Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" 200: - description: "logs returned as a string in response body" + description: "logs returned as a stream in response body" schema: type: "string" + format: "binary" 404: description: "no such task" schema: @@ -9824,10 +9891,7 @@ paths: default: false - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 3b698c2c2..4b9f50282 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -187,6 +187,15 @@ type ImageBuildOptions struct { // build request. The same identifier can be used to gracefully cancel the // build with the cancel request. BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string } // BuilderVersion sets the version of underlying builder to use diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index 89ad08c23..f767195b9 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -54,7 +54,7 @@ type Config struct { Env []string // List of environment variable to set in the container Cmd strslice.StrSlice // Command to run when starting the container Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go index c909d6ca3..222d14100 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go index 49efa0f2c..1ec9c3728 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go index ba41edcf3..f8a606687 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go index 7630ae54c..33addedf7 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go index 9e3910a6b..94b6a20e1 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index f4f5c09f8..654c88106 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -10,6 +10,29 @@ import ( "github.com/docker/go-units" ) +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == "private" +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == "host" +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == "" +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + // Isolation represents the isolation technology of a container. The supported // values are platform specific type Isolation string @@ -244,6 +267,16 @@ func (n PidMode) Container() string { return "" } +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + // DeviceMapping represents the device mapping between the host and the container. type DeviceMapping struct { PathOnHost string @@ -327,7 +360,7 @@ type Resources struct { CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup - DiskQuota int64 // Disk limit (in bytes) + DeviceRequests []DeviceRequest // List of device requests for device drivers KernelMemory int64 // Kernel memory limit (in bytes) KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) @@ -371,9 +404,10 @@ type HostConfig struct { CapAdd strslice.StrSlice // List of kernel capabilities to add to the container CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go new file mode 100644 index 000000000..f84f034cd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go @@ -0,0 +1,6 @@ +package types + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index d8f19ae22..1f75403f7 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -5,7 +5,6 @@ package filters // import "github.com/docker/docker/api/types/filters" import ( "encoding/json" - "errors" "regexp" "strings" @@ -37,41 +36,6 @@ func NewArgs(initialArgs ...KeyValuePair) Args { return args } -// ParseFlag parses a key=value string and adds it to an Args. -// -// Deprecated: Use Args.Add() -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned when a filter is not in the form key=value -// -// Deprecated: this error will be removed in a future version -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam encodes the Args as args JSON encoded string -// -// Deprecated: use ToJSON -func ToParam(a Args) (string, error) { - return ToJSON(a) -} - // MarshalJSON returns a JSON byte representation of the Args func (args Args) MarshalJSON() ([]byte, error) { if len(args.fields) == 0 { @@ -107,13 +71,6 @@ func ToParamWithVersion(version string, a Args) (string, error) { return ToJSON(a) } -// FromParam decodes a JSON encoded string into Args -// -// Deprecated: use FromJSON -func FromParam(p string) (Args, error) { - return FromJSON(p) -} - // FromJSON decodes a JSON encoded string into Args func FromJSON(p string) (Args, error) { args := NewArgs() @@ -275,14 +232,6 @@ func (args Args) FuzzyMatch(key, source string) bool { return false } -// Include returns true if the key exists in the mapping -// -// Deprecated: use Contains -func (args Args) Include(field string) bool { - _, ok := args.fields[field] - return ok -} - // Contains returns true if the key exists in the mapping func (args Args) Contains(field string) bool { _, ok := args.fields[field] diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go index d6b354bcd..b5a7a0c49 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -1,4 +1,4 @@ -package image +package image // import "github.com/docker/docker/api/types/image" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 48190c176..5bbedfcf6 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -67,10 +67,11 @@ type ContainerSpec struct { // The format of extra hosts on swarmkit is specified in: // http://man7.org/linux/man-pages/man5/hosts.5.html // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + Capabilities []string `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go index 1fdc9b043..e45045866 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: plugin.proto -// DO NOT EDIT! /* Package runtime is a generated protocol buffer package. @@ -38,6 +37,7 @@ type PluginSpec struct { Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` } func (m *PluginSpec) Reset() { *m = PluginSpec{} } @@ -73,6 +73,13 @@ func (m *PluginSpec) GetDisabled() bool { return false } +func (m *PluginSpec) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + // PluginPrivilege describes a permission the user has to accept // upon installing a plugin. type PluginPrivilege struct { @@ -160,6 +167,21 @@ func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { } i++ } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -208,24 +230,6 @@ func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -255,6 +259,12 @@ func (m *PluginSpec) Size() (n int) { if m.Disabled { n += 2 } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } return n } @@ -429,6 +439,35 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } } m.Disabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPlugin(dAtA[iNdEx:]) @@ -695,18 +734,21 @@ var ( func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } var fileDescriptorPlugin = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, - 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, - 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, - 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, - 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, - 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, - 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, - 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, - 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, - 0x0c, 0x01, 0x00, 0x00, + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, + 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, + 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, + 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, + 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, + 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, + 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, + 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, + 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, + 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, + 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, + 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, + 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, + 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, + 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, + 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto index 6d63b7783..9ef169046 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -9,6 +9,7 @@ message PluginSpec { string remote = 2; repeated PluginPrivilege privileges = 3; bool disabled = 4; + repeated string env = 5; } // PluginPrivilege describes a permission the user has to accept diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 484cd0be7..b25f99964 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -209,6 +209,8 @@ type Info struct { Managers int `json:",omitempty"` Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` } // Peer represents a peer. diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index ed555aad2..b13d9c4c7 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -146,7 +146,6 @@ type Commit struct { // GET "/info" type Info struct { ID string - Builder BuilderVersion Containers int ContainersRunning int ContainersPaused int @@ -178,6 +177,7 @@ type Info struct { NEventsListener int KernelVersion string OperatingSystem string + OSVersion string OSType string Architecture string IndexServerAddress string diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go index f12e48612..0c3772d3a 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go index 020198f73..45c3c1c9a 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go index 74df49508..3aae43e3d 100644 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -11,10 +11,6 @@ func (cli *Client) BuildCancel(ctx context.Context, id string) error { query.Set("id", id) serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - if err != nil { - return err - } - defer ensureReaderClosed(serverResp) - - return nil + ensureReaderClosed(serverResp) + return err } diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go index 42bbf99ef..397d67cdc 100644 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -31,11 +31,11 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru query.Set("filters", filters) serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return nil, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return nil, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 2b73fb553..66d46dd16 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -18,11 +18,11 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options } resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) if err != nil { return checkpoints, wrapResponseError(err, resp, "container", container) } err = json.NewDecoder(resp.body).Decode(&checkpoints) - ensureReaderClosed(resp) return checkpoints, err } diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index f174d3ce7..b63d4d6d4 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -81,13 +81,22 @@ type Client struct { customHTTPHeaders map[string]string // manualOverride is set to true when the version was set by users. manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool } // CheckRedirect specifies the policy for dealing with redirect responses: // If the request is non-GET return `ErrRedirect`. Otherwise use the last response. // // Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . -// The Docker client (and by extension docker API client) can be made to to send a request +// The Docker client (and by extension docker API client) can be made to send a request // like POST /containers//start where what would normally be in the name section of the URL is empty. // This triggers an HTTP 301 from the daemon. // In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. @@ -107,7 +116,7 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { // It won't send any version information if the version number is empty. It is // highly recommended that you set a version or your client may break if the // server is upgraded. -func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { +func NewClientWithOpts(ops ...Opt) (*Client, error) { client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { return nil, err @@ -169,8 +178,11 @@ func (cli *Client) Close() error { // getAPIPath returns the versioned request path to call the api. // It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = path.Join(cli.basePath, "/v"+v, p) @@ -186,19 +198,31 @@ func (cli *Client) ClientVersion() string { } // NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. +// API version. Any errors are silently ignored. If a manual override is in place, +// either through the `DOCKER_API_VERSION` environment variable, or if the client +// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation +// will be performed. func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - ping, _ := cli.Ping(ctx) - cli.NegotiateAPIVersionPing(ping) + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } } // NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. +// if the ping version is less than the default version. If a manual override is +// in place, either through the `DOCKER_API_VERSION` environment variable, or if +// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no +// negotiation is performed. func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { - if cli.manualOverride { - return + if !cli.manualOverride { + cli.negotiateAPIVersionPing(p) } +} +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) negotiateAPIVersionPing(p types.Ping) { // try the latest version before versioning headers existed if p.APIVersion == "" { p.APIVersion = "1.24" @@ -213,6 +237,12 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { if versions.LessThan(p.APIVersion, cli.version) { cli.version = p.APIVersion } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } } // DaemonHost returns the host address used by the client diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index c8b802ad3..ee7d411df 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -15,11 +15,11 @@ func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (t return response, err } resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 4ac566ad8..7d0ce3e11 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -18,10 +18,10 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C return swarm.Config{}, nil, err } resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go index 2b9d54606..565acc6e2 100644 --- a/vendor/github.com/docker/docker/client/config_list.go +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -27,12 +27,12 @@ func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptio } resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var configs []swarm.Config err = json.NewDecoder(resp.body).Decode(&configs) - ensureReaderClosed(resp) return configs, err } diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index a96871e98..a708fcaec 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -8,6 +8,6 @@ func (cli *Client) ConfigRemove(ctx context.Context, id string) error { return err } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "config", id) } diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index 377a2ea68..2966e88c8 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -45,11 +45,11 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option var response types.IDResponse resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index d706260ce..bb278bf7f 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -21,10 +21,10 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri urlStr := "/containers/" + containerID + "/archive" response, err := cli.head(ctx, urlStr, query, nil) + defer ensureReaderClosed(response) if err != nil { return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) } - defer ensureReaderClosed(response) return getContainerPathStatFromHeader(response.header) } @@ -45,11 +45,12 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str apiPath := "/containers/" + containerID + "/archive" response, err := cli.putRaw(ctx, apiPath, query, content, nil) + defer ensureReaderClosed(response) if err != nil { return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) } - defer ensureReaderClosed(response) + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior if response.statusCode != http.StatusOK { return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) } @@ -69,6 +70,7 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) } + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior if response.statusCode != http.StatusOK { return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) } diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index d269a6189..5b795e0c1 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "net/url" - "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" @@ -43,14 +42,11 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config } serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(serverResp) if err != nil { - if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, objectNotFoundError{object: "image", id: config.Image} - } return response, err } err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go index 3b7c90c96..29dac8491 100644 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -13,11 +13,11 @@ func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]con var changes []container.ContainerChangeResponseItem serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return changes, err } err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) return changes, err } diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 535536b1e..e3ee755b7 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -16,11 +16,11 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co } resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index e34bb16a2..c496bcffe 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -16,10 +16,10 @@ func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (ty return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) } - defer ensureReaderClosed(serverResp) var response types.ContainerJSON err = json.NewDecoder(serverResp.body).Decode(&response) @@ -36,10 +36,10 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri query.Set("size", "1") } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index 9c218e221..1e7a63a9c 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -45,12 +45,12 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis } resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var containers []types.Container err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) return containers, err } diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index 14f88d93b..04383deaa 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -23,10 +23,10 @@ func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Arg } serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index ab4cfc16f..df81461b8 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -22,6 +22,6 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti } resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "container", containerID) } diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go index 9c9fce7a0..a5b78999b 100644 --- a/vendor/github.com/docker/docker/client/container_top.go +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -18,11 +18,11 @@ func (cli *Client) ContainerTop(ctx context.Context, containerID string, argumen } resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index 14e7f23df..6917cf9fb 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -11,12 +11,11 @@ import ( func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { var response container.ContainerUpdateOKBody serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(serverResp) if err != nil { return response, err } err = json.NewDecoder(serverResp.body).Decode(&response) - - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index 8eb30eb5d..354cd3693 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -13,10 +13,10 @@ func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { var du types.DiskUsage serverResp, err := cli.get(ctx, "/system/df", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return du, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { return du, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index 7245bbeed..f4e3794cb 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -28,11 +28,11 @@ func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegist } resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) if err != nil { return distributionInspect, err } err = json.NewDecoder(resp.body).Decode(&distributionInspect) - ensureReaderClosed(resp) return distributionInspect, err } diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 0461af329..001c10288 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) @@ -32,16 +33,19 @@ func ErrorConnectionFailed(host string) error { return errConnectionFailed{host: host} } +// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility type notFound interface { error - NotFound() bool // Is the error a NotFound error + NotFound() bool } // IsErrNotFound returns true if the error is a NotFound error, which is returned // by the API when some object is not found. func IsErrNotFound(err error) bool { - te, ok := err.(notFound) - return ok && te.NotFound() + if _, ok := err.(notFound); ok { + return ok + } + return errdefs.IsNotFound(err) } type objectNotFoundError struct { @@ -49,9 +53,7 @@ type objectNotFoundError struct { id string } -func (e objectNotFoundError) NotFound() bool { - return true -} +func (e objectNotFoundError) NotFound() {} func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) @@ -64,7 +66,7 @@ func wrapResponseError(err error, resp serverResponse, object, id string) error case resp.statusCode == http.StatusNotFound: return objectNotFoundError{object: object, id: id} case resp.statusCode == http.StatusNotImplemented: - return notImplementedError{message: err.Error()} + return errdefs.NotImplemented(err) default: return err } @@ -83,8 +85,10 @@ func (u unauthorizedError) Error() string { // IsErrUnauthorized returns true if the error is caused // when a remote registry authentication fails func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok + if _, ok := err.(unauthorizedError); ok { + return ok + } + return errdefs.IsUnauthorized(err) } type pluginPermissionDenied struct { @@ -118,8 +122,10 @@ func (e notImplementedError) NotImplemented() bool { // This is returned by the API when a requested feature has not been // implemented. func IsErrNotImplemented(err error) bool { - te, ok := err.(notImplementedError) - return ok && te.NotImplemented() + if _, ok := err.(notImplementedError); ok { + return ok + } + return errdefs.IsNotImplemented(err) } // NewVersionError returns an error if the APIVersion required diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 0ac8248f2..e9c9a752f 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -23,7 +23,7 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{}, err } - apiPath := cli.getAPIPath(path, query) + apiPath := cli.getAPIPath(ctx, path, query) req, err := http.NewRequest("POST", apiPath, bodyEncoded) if err != nil { return types.HijackedResponse{}, err @@ -38,6 +38,17 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err } +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(ctx, req, proto) +} + // fallbackDial is used when WithDialer() was not called. // See cli.Dialer(). func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 9add3c10b..8fcf99503 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -134,5 +134,13 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query.Set("buildid", options.BuildID) } query.Set("version", string(options.Version)) + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } return query, nil } diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go index 0151b9517..b5bea10d8 100644 --- a/vendor/github.com/docker/docker/client/image_history.go +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -12,11 +12,11 @@ import ( func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { var history []image.HistoryResponseItem serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return history, err } err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) return history, err } diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 2f8f6d2f1..1eb8dce02 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -15,10 +15,10 @@ func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (typ return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} } serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index 32fae27b3..4fa8c006b 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -35,11 +35,11 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions } serverResp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(serverResp) if err != nil { return images, err } err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) return images, err } diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 78ee3f6c4..56af6d7f9 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -23,10 +23,10 @@ func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) ( } serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index d97aacf8c..a23975591 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -3,12 +3,12 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" - "net/http" "net/url" "strings" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" ) // ImagePull requests the docker host to pull an image from a remote registry. @@ -35,7 +35,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.I } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index a15871c2b..49d412ee3 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -4,11 +4,11 @@ import ( "context" "errors" "io" - "net/http" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" ) // ImagePush requests the docker host to push an image to a remote registry. @@ -36,7 +36,7 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options types.Im query.Set("tag", tag) resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 45d6e6f0d..84a41af0f 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -21,11 +21,11 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type var dels []types.ImageDeleteResponseItem resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) if err != nil { return dels, wrapResponseError(err, resp, "image", imageID) } err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) return dels, err } diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 176de3c58..82955a747 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -4,12 +4,12 @@ import ( "context" "encoding/json" "fmt" - "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" ) // ImageSearch makes the docker host to search by a term in a remote registry. @@ -29,7 +29,8 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + defer ensureReaderClosed(resp) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return results, privilegeErr @@ -41,7 +42,6 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) return results, err } diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index 121f256ab..c856704e2 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -13,10 +13,10 @@ import ( func (cli *Client) Info(ctx context.Context) (types.Info, error) { var info types.Info serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return info, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { return info, fmt.Errorf("Error reading remote info: %v", err) diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index d190f8e58..cde64be4b 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -38,7 +38,7 @@ type CommonAPIClient interface { ServerVersion(ctx context.Context) (types.Version, error) NegotiateAPIVersion(ctx context.Context) NegotiateAPIVersionPing(types.Ping) - DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) Dialer() func(context.Context) (net.Conn, error) Close() error } diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go index 7d6618190..f05852063 100644 --- a/vendor/github.com/docker/docker/client/login.go +++ b/vendor/github.com/docker/docker/client/login.go @@ -3,7 +3,6 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" - "net/http" "net/url" "github.com/docker/docker/api/types" @@ -14,16 +13,13 @@ import ( // It returns unauthorizedError when the authentication fails. func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) - if resp.statusCode == http.StatusUnauthorized { - return registry.AuthenticateOKBody{}, unauthorizedError{err} - } if err != nil { return registry.AuthenticateOKBody{}, err } var response registry.AuthenticateOKBody err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 41da2ac61..278d9383a 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -15,11 +15,11 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options types } var response types.NetworkCreateResponse serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(serverResp) if err != nil { return response, err } - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) + err = json.NewDecoder(serverResp.body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 025f6d875..89a05b302 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -34,10 +34,10 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, query.Set("scope", options.Scope) } resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + defer ensureReaderClosed(resp) if err != nil { return networkResource, nil, wrapResponseError(err, resp, "network", networkID) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index f16b2f562..7130c1364 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -22,10 +22,10 @@ func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOpt } var networkResources []types.NetworkResource resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) if err != nil { return networkResources, err } err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) return networkResources, err } diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index 6418b8b60..cebb18821 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -23,10 +23,10 @@ func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) } serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving network prune report: %v", err) diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index 12741437b..e71b16d86 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -5,6 +5,6 @@ import "context" // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "network", networkID) } diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index 593b2e9f0..d296c9fdd 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -15,10 +15,10 @@ func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} } serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index 9883f6fc5..c212906bc 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -25,12 +25,12 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) } resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var nodes []swarm.Node err = json.NewDecoder(resp.body).Decode(&nodes) - ensureReaderClosed(resp) return nodes, err } diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index e7a750571..03ab87809 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -15,6 +15,6 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. } resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "node", nodeID) } diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 12eb25b18..6f77f0955 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -6,12 +6,16 @@ import ( "net/http" "os" "path/filepath" + "time" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" ) +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + // FromEnv configures the client with values from environment variables. // // Supported environment variables: @@ -45,8 +49,9 @@ func FromEnv(c *Client) error { } if version := os.Getenv("DOCKER_API_VERSION"); version != "" { - c.version = version - c.manualOverride = true + if err := WithVersion(version)(c); err != nil { + return err + } } return nil } @@ -54,13 +59,13 @@ func FromEnv(c *Client) error { // WithDialer applies the dialer.DialContext to the client transport. This can be // used to set the Timeout and KeepAlive settings of the client. // Deprecated: use WithDialContext -func WithDialer(dialer *net.Dialer) func(*Client) error { +func WithDialer(dialer *net.Dialer) Opt { return WithDialContext(dialer.DialContext) } // WithDialContext applies the dialer to the client transport. This can be // used to set the Timeout and KeepAlive settings of the client. -func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) func(*Client) error { +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { return func(c *Client) error { if transport, ok := c.client.Transport.(*http.Transport); ok { transport.DialContext = dialContext @@ -71,7 +76,7 @@ func WithDialContext(dialContext func(ctx context.Context, network, addr string) } // WithHost overrides the client host with the specified one. -func WithHost(host string) func(*Client) error { +func WithHost(host string) Opt { return func(c *Client) error { hostURL, err := ParseHostURL(host) if err != nil { @@ -89,7 +94,7 @@ func WithHost(host string) func(*Client) error { } // WithHTTPClient overrides the client http client with the specified one -func WithHTTPClient(client *http.Client) func(*Client) error { +func WithHTTPClient(client *http.Client) Opt { return func(c *Client) error { if client != nil { c.client = client @@ -98,8 +103,16 @@ func WithHTTPClient(client *http.Client) func(*Client) error { } } +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + // WithHTTPHeaders overrides the client default http headers -func WithHTTPHeaders(headers map[string]string) func(*Client) error { +func WithHTTPHeaders(headers map[string]string) Opt { return func(c *Client) error { c.customHTTPHeaders = headers return nil @@ -107,7 +120,7 @@ func WithHTTPHeaders(headers map[string]string) func(*Client) error { } // WithScheme overrides the client scheme with the specified one -func WithScheme(scheme string) func(*Client) error { +func WithScheme(scheme string) Opt { return func(c *Client) error { c.scheme = scheme return nil @@ -115,7 +128,7 @@ func WithScheme(scheme string) func(*Client) error { } // WithTLSClientConfig applies a tls config to the client transport. -func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) error { +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { return func(c *Client) error { opts := tlsconfig.Options{ CAFile: cacertPath, @@ -135,10 +148,25 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) err } } -// WithVersion overrides the client version with the specified one -func WithVersion(version string) func(*Client) error { +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { return func(c *Client) error { - c.version = version + c.negotiateVersion = true return nil } } diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index 0ebb6b752..90f39ec14 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -6,6 +6,7 @@ import ( "path" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" ) // Ping pings the server and returns the value of the "Docker-Experimental", @@ -14,6 +15,10 @@ import ( // by the daemon. func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping + + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping req, err := cli.buildRequest("HEAD", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err @@ -26,6 +31,8 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { // Server handled the request, so parse the response return parsePingResponse(cli, serverResp) } + } else if IsErrConnectionFailed(err) { + return ping, err } req, err = cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) @@ -33,17 +40,18 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { return ping, err } serverResp, err = cli.doRequest(ctx, req) + defer ensureReaderClosed(serverResp) if err != nil { return ping, err } - defer ensureReaderClosed(serverResp) return parsePingResponse(cli, serverResp) } func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { var ping types.Ping if resp.header == nil { - return ping, cli.checkResponseErr(resp) + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) } ping.APIVersion = resp.header.Get("API-Version") ping.OSType = resp.header.Get("OSType") @@ -53,5 +61,6 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { if bv := resp.header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } - return ping, cli.checkResponseErr(resp) + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) } diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go index 4591db50f..b95dbaf68 100644 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -18,9 +18,6 @@ func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, cr query.Set("name", createOptions.RepoName) resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - if err != nil { - return err - } ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 0ab7beaee..81b89732b 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -15,11 +15,11 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type return nil, nil, objectNotFoundError{object: "plugin", id: name} } resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, nil, wrapResponseError(err, resp, "plugin", name) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { return nil, nil, err diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 13baa40a9..012afe61c 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -4,11 +4,11 @@ import ( "context" "encoding/json" "io" - "net/http" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) @@ -78,7 +78,7 @@ func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileg func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { // todo: do inspect before to check existing name before checking privileges newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index ade1051a9..8285cecd6 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -22,11 +22,11 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) if err != nil { return plugins, wrapResponseError(err, resp, "plugin", "") } err = json.NewDecoder(resp.body).Decode(&plugins) - ensureReaderClosed(resp) return plugins, err } diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index 8563bab0d..51ca1040d 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -15,6 +15,6 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. } resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "plugin", name) } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 52ed12446..3078335e2 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) @@ -114,15 +115,16 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea } func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { return serverResponse{}, err } resp, err := cli.doRequest(ctx, req) if err != nil { - return resp, err + return resp, errdefs.FromStatusCode(err, resp.statusCode) } - return resp, cli.checkResponseErr(resp) + err = cli.checkResponseErr(resp) + return resp, errdefs.FromStatusCode(err, resp.statusCode) } func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index 09fae82f2..fd5b91413 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -15,11 +15,11 @@ func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (t return response, err } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index e8322f458..d093916c9 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -18,10 +18,10 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} } resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index f6bf7ba47..a0289c9f4 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -27,12 +27,12 @@ func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptio } resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var secrets []swarm.Secret err = json.NewDecoder(resp.body).Decode(&secrets) - ensureReaderClosed(resp) return secrets, err } diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index e9d521829..c16f55580 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -8,6 +8,6 @@ func (cli *Client) SecretRemove(ctx context.Context, id string) error { return err } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "secret", id) } diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 8fadda4a9..620fc6cff 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -72,6 +72,7 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, var response types.ServiceCreateResponse resp, err := cli.post(ctx, "/services/create", nil, service, headers) + defer ensureReaderClosed(resp) if err != nil { return response, err } @@ -82,7 +83,6 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) } - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index de6aa22de..2801483b8 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -20,10 +20,10 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, query := url.Values{} query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go index 7d53e2b9b..64d35e715 100644 --- a/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -24,12 +24,12 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt } resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var services []swarm.Service err = json.NewDecoder(resp.body).Decode(&services) - ensureReaderClosed(resp) return services, err } diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index fe3421bec..953a2adf5 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -5,6 +5,6 @@ import "context" // ServiceRemove kills and removes a service. func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "service", serviceID) } diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index 3c21214f6..cd0f59e21 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -79,6 +79,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version var response types.ServiceUpdateResponse resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + defer ensureReaderClosed(resp) if err != nil { return response, err } @@ -89,6 +90,5 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) } - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go deleted file mode 100644 index df199f3d0..000000000 --- a/vendor/github.com/docker/docker/client/session.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net" - "net/http" -) - -// DialSession returns a connection that can be used communication with daemon -func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest("POST", "/session", nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - return cli.setupHijackConn(ctx, req, proto) -} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index 0c50c01a8..19f59dd58 100644 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -10,12 +10,12 @@ import ( // SwarmGetUnlockKey retrieves the swarm's unlock key. func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.SwarmUnlockKeyResponse{}, err } var response types.SwarmUnlockKeyResponse err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go index 742ca0f04..da3c1637e 100644 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -10,12 +10,12 @@ import ( // SwarmInit initializes the swarm. func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(serverResp) if err != nil { return "", err } var response string err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go index cfaabb25b..b52b67a88 100644 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -10,12 +10,12 @@ import ( // SwarmInspect inspects the swarm. func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { serverResp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Swarm{}, err } var response swarm.Swarm err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index e1c0a736d..44d40ba5a 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -15,10 +15,10 @@ func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} } serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go index 42d20c1b8..4869b4449 100644 --- a/vendor/github.com/docker/docker/client/task_list.go +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -24,12 +24,12 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) } resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var tasks []swarm.Task err = json.NewDecoder(resp.body).Decode(&tasks) - ensureReaderClosed(resp) return tasks, err } diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go index 1989f6d6d..8f17ff4e8 100644 --- a/vendor/github.com/docker/docker/client/version.go +++ b/vendor/github.com/docker/docker/client/version.go @@ -10,12 +10,12 @@ import ( // ServerVersion returns information of the docker client and server host. func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) if err != nil { return types.Version{}, err } var server types.Version err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) return server, err } diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index f1f6fcdc4..92761b3c6 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -12,10 +12,10 @@ import ( func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { var volume types.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + defer ensureReaderClosed(resp) if err != nil { return volume, err } err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) return volume, err } diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index f840682d2..e20b2c67c 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -23,10 +23,10 @@ func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (t var volume types.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return volume, nil, wrapResponseError(err, resp, "volume", volumeID) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 284554d67..2380d5638 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -22,11 +22,11 @@ func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumet query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) if err != nil { return volumes, err } err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) return volumes, err } diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 70041efed..6e324708f 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -23,10 +23,10 @@ func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) } serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving volume prune report: %v", err) diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index fc5a71d33..79decdafa 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -16,6 +16,6 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "volume", volumeID) } diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go index e6a2275b2..61e7456b4 100644 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -43,11 +43,6 @@ type ErrNotModified interface { NotModified() } -// ErrAlreadyExists is a special case of ErrConflict which signals that the desired object already exists -type ErrAlreadyExists interface { - AlreadyExists() -} - // ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. type ErrNotImplemented interface { NotImplemented() diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go index a28881caf..c9916e013 100644 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -130,22 +130,6 @@ func NotModified(err error) error { return errNotModified{err} } -type errAlreadyExists struct{ error } - -func (errAlreadyExists) AlreadyExists() {} - -func (e errAlreadyExists) Cause() error { - return e.error -} - -// AlreadyExists is a helper to create an error of the class with the same name from any error type -func AlreadyExists(err error) error { - if err == nil || IsAlreadyExists(err) { - return err - } - return errAlreadyExists{err} -} - type errNotImplemented struct{ error } func (errNotImplemented) NotImplemented() {} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 000000000..ac9bf6d33 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,172 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + + // Stop right there + // Are you sure you should be adding a new error class here? Do one of the existing ones work? + + // Note that the below functions are already checking the error causal chain for matches. + switch { + case IsNotFound(err): + statusCode = http.StatusNotFound + case IsInvalidParameter(err): + statusCode = http.StatusBadRequest + case IsConflict(err): + statusCode = http.StatusConflict + case IsUnauthorized(err): + statusCode = http.StatusUnauthorized + case IsUnavailable(err): + statusCode = http.StatusServiceUnavailable + case IsForbidden(err): + statusCode = http.StatusForbidden + case IsNotModified(err): + statusCode = http.StatusNotModified + case IsNotImplemented(err): + statusCode = http.StatusNotImplemented + case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): + statusCode = http.StatusInternalServerError + default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromDistributionError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + if e, ok := err.(causer); ok { + return GetHTTPErrorStatusCode(e.Cause()) + } + + logrus.WithFields(logrus.Fields{ + "module": "api", + "error_type": fmt.Sprintf("%T", err), + }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return err + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + logrus.WithFields(logrus.Fields{ + "module": "api", + "status_code": fmt.Sprintf("%d", statusCode), + }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) + + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch status.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + if e, ok := err.(causer); ok { + return statusCodeFromGRPCError(e.Cause()) + } + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} + +// statusCodeFromDistributionError returns status code according to registry errcode +// code is loosely based on errcode.ServeJSON() in docker/distribution +func statusCodeFromDistributionError(err error) int { + switch errs := err.(type) { + case errcode.Errors: + if len(errs) < 1 { + return http.StatusInternalServerError + } + if _, ok := errs[0].(errcode.ErrorCoder); ok { + return statusCodeFromDistributionError(errs[0]) + } + case errcode.ErrorCoder: + return errs.ErrorCode().Descriptor().HTTPStatusCode + default: + if e, ok := err.(causer); ok { + return statusCodeFromDistributionError(e.Cause()) + } + } + return http.StatusInternalServerError +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go index e0513331b..3abf07d0c 100644 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -15,7 +15,6 @@ func getImplementer(err error) error { ErrForbidden, ErrSystem, ErrNotModified, - ErrAlreadyExists, ErrNotImplemented, ErrCancelled, ErrDeadline, @@ -77,12 +76,6 @@ func IsNotModified(err error) bool { return ok } -// IsAlreadyExists returns if the passed in error is a AlreadyExists error -func IsAlreadyExists(err error) bool { - _, ok := getImplementer(err).(ErrAlreadyExists) - return ok -} - // IsNotImplemented returns if the passed in error is an ErrNotImplemented func IsNotImplemented(err error) bool { _, ok := getImplementer(err).(ErrNotImplemented) diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 230422eac..b3af7a422 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -4,7 +4,6 @@ import ( "bufio" "fmt" "os" - "sort" "strconv" "strings" ) @@ -203,8 +202,6 @@ func (i *IdentityMapping) GIDs() []IDMap { func createIDMap(subidRanges ranges) []IDMap { idMap := []IDMap{} - // sort the ranges by lowest ID first - sort.Sort(subidRanges) containerID := 0 for _, idrange := range subidRanges { idMap = append(idMap, IDMap{ diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index d4bbf3c9d..87514b643 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -128,8 +128,9 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() if bp.bufLen == 0 { if bp.closeErr != nil { + err := bp.closeErr bp.mu.Unlock() - return 0, bp.closeErr + return 0, err } bp.wait.Wait() if bp.bufLen == 0 && bp.closeErr != nil { diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index 4afd63c42..be0631c63 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -102,13 +102,13 @@ func Mounted(mountpoint string) (bool, error) { // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See // flags.go for supported option flags. func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) + flag, data := parseOptions(options) if flag&REMOUNT != REMOUNT { if mounted, err := Mounted(target); err != nil || mounted { return err } } - return ForceMount(device, target, mType, options) + return mount(device, target, mType, uintptr(flag), data) } // ForceMount will mount a filesystem according to the specified configuration, diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go index 36c89dc1a..307b93459 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -13,8 +13,7 @@ import ( "unsafe" ) -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts. +//parseMountTable returns information about mounted filesystems func parseMountTable(filter FilterFunc) ([]*Info, error) { var rawEntries *C.struct_statfs @@ -37,7 +36,7 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { if filter != nil { // filter out entries we're not interested in - skip, stop = filter(p) + skip, stop = filter(&mountinfo) if skip { continue } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go index c1dba01fc..fe6e3ddba 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -7,16 +7,21 @@ import ( "os" "strconv" "strings" + + "github.com/pkg/errors" ) func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { s := bufio.NewScanner(r) out := []*Info{} + var err error for s.Scan() { - if err := s.Err(); err != nil { + if err = s.Err(); err != nil { return nil, err } /* + See http://man7.org/linux/man-pages/man5/proc.5.html + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) @@ -52,8 +57,15 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { p.Major, _ = strconv.Atoi(mm[0]) p.Minor, _ = strconv.Atoi(mm[1]) - p.Root = fields[3] - p.Mountpoint = fields[4] + p.Root, err = strconv.Unquote(`"` + fields[3] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3]) + } + + p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4]) + } p.Opts = fields[5] var skip, stop bool diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index 8a100f0bc..db3882874 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -3,49 +3,49 @@ package mount // import "github.com/docker/docker/pkg/mount" // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") + return ensureMountedAs(mountPoint, SHARED) } // MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") + return ensureMountedAs(mountPoint, RSHARED) } // MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. // See the supported options in flags.go for further reference. func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") + return ensureMountedAs(mountPoint, PRIVATE) } // MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option // enabled. See the supported options in flags.go for further reference. func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") + return ensureMountedAs(mountPoint, RPRIVATE) } // MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") + return ensureMountedAs(mountPoint, SLAVE) } // MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") + return ensureMountedAs(mountPoint, RSLAVE) } // MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option // enabled. See the supported options in flags.go for further reference. func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") + return ensureMountedAs(mountPoint, UNBINDABLE) } // MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount // option enabled. See the supported options in flags.go for further reference. func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") + return ensureMountedAs(mountPoint, RUNBINDABLE) } // MakeMount ensures that the file or directory given is a mount point, @@ -59,13 +59,13 @@ func MakeMount(mnt string) error { return nil } - return Mount(mnt, mnt, "none", "bind") + return mount(mnt, mnt, "none", uintptr(BIND), "") } -func ensureMountedAs(mountPoint, options string) error { - if err := MakeMount(mountPoint); err != nil { +func ensureMountedAs(mnt string, flags int) error { + if err := MakeMount(mnt); err != nil { return err } - return ForceMount("", mountPoint, "none", options) + return mount("", mnt, "none", uintptr(flags), "") } diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go index 4c6a93d4c..3d68800eb 100644 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -14,13 +14,14 @@ var ( "amazing", "angry", "awesome", + "beautiful", "blissful", "bold", "boring", "brave", + "busy", "charming", "clever", - "cocky", "cool", "compassionate", "competent", @@ -39,16 +40,19 @@ var ( "elegant", "eloquent", "epic", + "exciting", "fervent", "festive", "flamboyant", "focused", "friendly", "frosty", + "funny", "gallant", "gifted", "goofy", "gracious", + "great", "happy", "hardcore", "heuristic", @@ -56,6 +60,8 @@ var ( "hungry", "infallible", "inspiring", + "interesting", + "intelligent", "jolly", "jovial", "keen", @@ -69,6 +75,7 @@ var ( "musing", "naughty", "nervous", + "nice", "nifty", "nostalgic", "objective", @@ -90,6 +97,7 @@ var ( "silly", "sleepy", "stoic", + "strange", "stupefied", "suspicious", "sweet", @@ -193,6 +201,9 @@ var ( // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose "bose", + // Katherine Louise Bouman is an imaging scientist and Assistant Professor of Computer Science at the California Institute of Technology. She researches computational methods for imaging, and developed an algorithm that made possible the picture first visualization of a black hole using the Event Horizon Telescope. - https://en.wikipedia.org/wiki/Katie_Bouman + "bouman", + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville "boyd", @@ -693,6 +704,9 @@ var ( // Mildred Sanderson - American mathematician best known for Sanderson's theorem concerning modular invariants. https://en.wikipedia.org/wiki/Mildred_Sanderson "sanderson", + // Satoshi Nakamoto is the name used by the unknown person or group of people who developed bitcoin, authored the bitcoin white paper, and created and deployed bitcoin's original reference implementation. https://en.wikipedia.org/wiki/Satoshi_Nakamoto + "satoshi", + // Adi Shamir - Israeli cryptographer whose numerous inventions and contributions to cryptography include the Ferge Fiat Shamir identification scheme, the Rivest Shamir Adleman (RSA) public-key cryptosystem, the Shamir's secret sharing scheme, the breaking of the Merkle-Hellman cryptosystem, the TWINKLE and TWIRL factoring devices and the discovery of differential cryptanalysis (with Eli Biham). https://en.wikipedia.org/wiki/Adi_Shamir "shamir", diff --git a/vendor/github.com/docker/docker/pkg/system/args_windows.go b/vendor/github.com/docker/docker/pkg/system/args_windows.go new file mode 100644 index 000000000..b7c9487a0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/args_windows.go @@ -0,0 +1,16 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "strings" + + "golang.org/x/sys/windows" +) + +// EscapeArgs makes a Windows-style escaped command line from a set of arguments +func EscapeArgs(args []string) string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = windows.EscapeArg(a) + } + return strings.Join(escapedArgs, " ") +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index a1f6013f1..3049ff38a 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -18,8 +18,6 @@ import ( const ( // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System - SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" ) // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory diff --git a/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/docker/docker/pkg/system/init_unix.go index 4996a67c1..c2bb0f4cc 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/init_unix.go @@ -5,3 +5,8 @@ package system // import "github.com/docker/docker/pkg/system" // InitLCOW does nothing since LCOW is a windows only feature func InitLCOW(experimental bool) { } + +// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +func ContainerdRuntimeSupported(_ bool, _ string) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index 4910ff69d..f303aa906 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -1,12 +1,41 @@ package system // import "github.com/docker/docker/pkg/system" -// lcowSupported determines if Linux Containers on Windows are supported. -var lcowSupported = false +import ( + "os" -// InitLCOW sets whether LCOW is supported or not + "github.com/Microsoft/hcsshim/osversion" + "github.com/sirupsen/logrus" +) + +var ( + // lcowSupported determines if Linux Containers on Windows are supported. + lcowSupported = false + + // containerdRuntimeSupported determines if ContainerD should be the runtime. + // As of March 2019, this is an experimental feature. + containerdRuntimeSupported = false +) + +// InitLCOW sets whether LCOW is supported or not. Requires RS5+ func InitLCOW(experimental bool) { v := GetOSVersion() - if experimental && v.Build >= 16299 { + if experimental && v.Build >= osversion.RS5 { lcowSupported = true } } + +// InitContainerdRuntime sets whether to use ContainerD for runtime +// on Windows. This is an experimental feature still in development, and +// also requires an environment variable to be set (so as not to turn the +// feature on from simply experimental which would also mean LCOW. +func InitContainerdRuntime(experimental bool, cdPath string) { + if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { + logrus.Warnf("Using ContainerD runtime. This feature is experimental") + containerdRuntimeSupported = true + } +} + +// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +func ContainerdRuntimeSupported() bool { + return containerdRuntimeSupported +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json index 7a3a99ae1..250a03e13 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/default.json +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -162,6 +162,7 @@ "ioctl", "io_destroy", "io_getevents", + "io_pgetevents", "ioprio_get", "ioprio_set", "io_setup", @@ -595,7 +596,7 @@ "args": [ { "index": 0, - "value": 2080505856, + "value": 2114060288, "valueTwo": 0, "op": "SCMP_CMP_MASKED_EQ" } @@ -620,7 +621,7 @@ "args": [ { "index": 1, - "value": 2080505856, + "value": 2114060288, "valueTwo": 0, "op": "SCMP_CMP_MASKED_EQ" } diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go index 077674940..53333f43e 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -155,6 +155,7 @@ func DefaultProfile() *types.Seccomp { "ioctl", "io_destroy", "io_getevents", + "io_pgetevents", "ioprio_get", "ioprio_set", "io_setup", @@ -517,7 +518,7 @@ func DefaultProfile() *types.Seccomp { Args: []*types.Arg{ { Index: 0, - Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, ValueTwo: 0, Op: types.OpMaskedEqual, }, @@ -535,7 +536,7 @@ func DefaultProfile() *types.Seccomp { Args: []*types.Arg{ { Index: 1, - Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, ValueTwo: 0, Op: types.OpMaskedEqual, }, diff --git a/vendor/github.com/docker/go-metrics/go.mod b/vendor/github.com/docker/go-metrics/go.mod new file mode 100644 index 000000000..7e328f0cf --- /dev/null +++ b/vendor/github.com/docker/go-metrics/go.mod @@ -0,0 +1,5 @@ +module github.com/docker/go-metrics + +go 1.11 + +require github.com/prometheus/client_golang v1.1.0 diff --git a/vendor/github.com/docker/go-metrics/go.sum b/vendor/github.com/docker/go-metrics/go.sum new file mode 100644 index 000000000..b8fb9d079 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/go.sum @@ -0,0 +1,67 @@ +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8ab..000000000 --- a/vendor/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585a..000000000 --- a/vendor/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175fe..000000000 --- a/vendor/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md deleted file mode 100644 index dcffb31ae..000000000 --- a/vendor/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# libtrust - -> **WARNING** this library is no longer actively developed, and will be integrated -> in the [docker/distribution][https://www.github.com/docker/distribution] -> repository in future. - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go deleted file mode 100644 index 3dcca33cb..000000000 --- a/vendor/github.com/docker/libtrust/certificates.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "time" -) - -type certTemplateInfo struct { - commonName string - domains []string - ipAddresses []net.IP - isCA bool - clientAuth bool - serverAuth bool -} - -func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { - // Generate a certificate template which is valid from the past week to - // 10 years from now. The usage of the certificate depends on the - // specified fields in the given certTempInfo object. - var ( - keyUsage x509.KeyUsage - extKeyUsage []x509.ExtKeyUsage - ) - - if info.isCA { - keyUsage = x509.KeyUsageCertSign - } - - if info.clientAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if info.serverAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) - } - - return &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: info.commonName, - }, - NotBefore: time.Now().Add(-time.Hour * 24 * 7), - NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), - DNSNames: info.domains, - IPAddresses: info.ipAddresses, - IsCA: info.isCA, - KeyUsage: keyUsage, - ExtKeyUsage: extKeyUsage, - BasicConstraintsValid: info.isCA, - } -} - -func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { - pubCertTemplate := generateCertTemplate(subInfo) - privCertTemplate := generateCertTemplate(issInfo) - - certDER, err := x509.CreateCertificate( - rand.Reader, pubCertTemplate, privCertTemplate, - pub.CryptoPublicKey(), priv.CryptoPrivateKey(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate: %s", err) - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate: %s", err) - } - - return -} - -// GenerateSelfSignedServerCert creates a self-signed certificate for the -// given key which is to be used for TLS servers with the given domains and -// IP addresses. -func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - domains: domains, - ipAddresses: ipAddresses, - serverAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateSelfSignedClientCert creates a self-signed certificate for the -// given key which is to be used for TLS clients. -func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - clientAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateCACert creates a certificate which can be used as a trusted -// certificate authority. -func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { - subjectInfo := &certTemplateInfo{ - commonName: trustedKey.KeyID(), - isCA: true, - } - issuerInfo := &certTemplateInfo{ - commonName: signer.KeyID(), - } - - return generateCert(trustedKey, signer, subjectInfo, issuerInfo) -} - -// GenerateCACertPool creates a certificate authority pool to be used for a -// TLS configuration. Any self-signed certificates issued by the specified -// trusted keys will be verified during a TLS handshake -func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, trustedKey := range trustedKeys { - cert, err := GenerateCACert(signer, trustedKey) - if err != nil { - return nil, fmt.Errorf("failed to generate CA certificate: %s", err) - } - - certPool.AddCert(cert) - } - - return certPool, nil -} - -// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - certificates := []*x509.Certificate{} - var block *pem.Block - block, b = pem.Decode(b) - for ; block != nil; block, b = pem.Decode(b) { - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } else { - return nil, fmt.Errorf("invalid pem block type: %s", block.Type) - } - } - - return certificates, nil -} - -// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificatePool(filename string) (*x509.CertPool, error) { - certs, err := LoadCertificateBundle(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go deleted file mode 100644 index ec5d2159c..000000000 --- a/vendor/github.com/docker/libtrust/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package libtrust provides an interface for managing authentication and -authorization using public key cryptography. Authentication is handled -using the identity attached to the public key and verified through TLS -x509 certificates, a key challenge, or signature. Authorization and -access control is managed through a trust graph distributed between -both remote trust servers and locally cached and managed data. -*/ -package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go deleted file mode 100644 index 00bbe4b3c..000000000 --- a/vendor/github.com/docker/libtrust/ec_key.go +++ /dev/null @@ -1,428 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * EC DSA PUBLIC KEY - */ - -// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital -// signature algorithms. -type ecPublicKey struct { - *ecdsa.PublicKey - curveName string - signatureAlgorithm *signatureAlgorithm - extended map[string]interface{} -} - -func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { - curve := cryptoPublicKey.Curve - - switch { - case curve == elliptic.P256(): - return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil - case curve == elliptic.P384(): - return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil - case curve == elliptic.P521(): - return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil - default: - return nil, errors.New("unsupported elliptic curve") - } -} - -// KeyType returns the key type for elliptic curve keys, i.e., "EC". -func (k *ecPublicKey) KeyType() string { - return "EC" -} - -// CurveName returns the elliptic curve identifier. -// Possible values are "P-256", "P-384", and "P-521". -func (k *ecPublicKey) CurveName() string { - return k.curveName -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *ecPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *ecPublicKey) String() string { - return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this -// PublicKey. The alg parameter should identify the digital signature -// algorithm which was used to produce the signature and should be supported -// by this public key. Returns a nil error if the signature is valid. -func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // For EC keys there is only one supported signature algorithm depending - // on the curve parameters. - if k.signatureAlgorithm.HeaderParam() != alg { - return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) - } - - // signature is the concatenation of (r, s), base64Url encoded. - sigLength := len(signature) - expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) - if sigLength != expectedOctetLength { - return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) - } - - rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] - r := new(big.Int).SetBytes(rBytes) - s := new(big.Int).SetBytes(sBytes) - - hasher := k.signatureAlgorithm.HashID().New() - _, err := io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - if !ecdsa.Verify(k.PublicKey, hash, r, s) { - return errors.New("invalid signature") - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *ecPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["crv"] = k.CurveName() - - xBytes := k.X.Bytes() - yBytes := k.Y.Bytes() - octetLength := (k.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output so that x, y are each - // *octetLength* bytes long. - xBuf := make([]byte, octetLength-len(xBytes), octetLength) - yBuf := make([]byte, octetLength-len(yBytes), octetLength) - xBuf = append(xBuf, xBytes...) - yBuf = append(yBuf, yBytes...) - - jwk["x"] = joseBase64UrlEncode(xBuf) - jwk["y"] = joseBase64UrlEncode(yBuf) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *ecPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { - // JWK key type (kty) has already been determined to be "EC". - // Need to extract 'crv', 'x', 'y', and 'kid' and check for - // consistency. - - // Get the curve identifier value. - crv, err := stringFromMap(jwk, "crv") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) - } - - var ( - curve elliptic.Curve - sigAlg *signatureAlgorithm - ) - - switch { - case crv == "P-256": - curve = elliptic.P256() - sigAlg = es256 - case crv == "P-384": - curve = elliptic.P384() - sigAlg = es384 - case crv == "P-521": - curve = elliptic.P521() - sigAlg = es512 - default: - return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) - } - - // Get the X and Y coordinates for the public key point. - xB64Url, err := stringFromMap(jwk, "x") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - x, err := parseECCoordinate(xB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - - yB64Url, err := stringFromMap(jwk, "y") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - y, err := parseECCoordinate(yB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - - key := &ecPublicKey{ - PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, - curveName: crv, signatureAlgorithm: sigAlg, - } - - // Key ID is optional too, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) - } - } - - key.extended = jwk - - return key, nil -} - -/* - * EC DSA PRIVATE KEY - */ - -// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature -// algorithms. -type ecPrivateKey struct { - ecPublicKey - *ecdsa.PrivateKey -} - -func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { - publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) - if err != nil { - return nil, err - } - - return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *ecPrivateKey) PublicKey() PublicKey { - return &k.ecPublicKey -} - -func (k *ecPrivateKey) String() string { - return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the elliptic curve private key. If the specified hashing algorithm is -// supported by this key, that hash function is used to generate the signature -// otherwise the the default hashing algorithm for this key is used. Returns -// the signature and the name of the JWK signature algorithm used, e.g., -// "ES256", "ES384", "ES512". -func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - // The given hashId is only a suggestion, and since EC keys only support - // on signature/hash algorithm given the curve name, we disregard it for - // the elliptic curve JWK signature implementation. - hasher := k.signatureAlgorithm.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - rBytes, sBytes := r.Bytes(), s.Bytes() - octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output - rBuf := make([]byte, octetLength-len(rBytes), octetLength) - sBuf := make([]byte, octetLength-len(sBytes), octetLength) - - rBuf = append(rBuf, rBytes...) - sBuf = append(sBuf, sBytes...) - - signature = append(rBuf, sBuf...) - alg = k.signatureAlgorithm.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *ecPrivateKey) toMap() map[string]interface{} { - jwk := k.ecPublicKey.toMap() - - dBytes := k.D.Bytes() - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := k.ecPublicKey.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - // Create a buffer with the necessary zero-padding. - dBuf := make([]byte, octetLength-len(dBytes), octetLength) - dBuf = append(dBuf, dBytes...) - - jwk["d"] = joseBase64UrlEncode(dBuf) - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) - } - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) -} - -func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { - dB64Url, err := stringFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key: %s", err) - } - - // JWK key type (kty) has already been determined to be "EC". - // Need to extract the public key information, then extract the private - // key value 'd'. - publicKey, err := ecPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - d, err := parseECPrivateParam(dB64Url, publicKey.Curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) - } - - key := &ecPrivateKey{ - ecPublicKey: *publicKey, - PrivateKey: &ecdsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: d, - }, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { - k = new(ecPrivateKey) - k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, err - } - - k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. -func GenerateECP256PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-256 key: %s", err) - } - - k.curveName = "P-256" - k.signatureAlgorithm = es256 - - return k, nil -} - -// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. -func GenerateECP384PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P384()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-384 key: %s", err) - } - - k.curveName = "P-384" - k.signatureAlgorithm = es384 - - return k, nil -} - -// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. -func GenerateECP521PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P521()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-521 key: %s", err) - } - - k.curveName = "P-521" - k.signatureAlgorithm = es512 - - return k, nil -} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go deleted file mode 100644 index 5b2b4fca6..000000000 --- a/vendor/github.com/docker/libtrust/filter.go +++ /dev/null @@ -1,50 +0,0 @@ -package libtrust - -import ( - "path/filepath" -) - -// FilterByHosts filters the list of PublicKeys to only those which contain a -// 'hosts' pattern which matches the given host. If *includeEmpty* is true, -// then keys which do not specify any hosts are also returned. -func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { - filtered := make([]PublicKey, 0, len(keys)) - - for _, pubKey := range keys { - var hosts []string - switch v := pubKey.GetExtendedField("hosts").(type) { - case []string: - hosts = v - case []interface{}: - for _, value := range v { - h, ok := value.(string) - if !ok { - continue - } - hosts = append(hosts, h) - } - } - - if len(hosts) == 0 { - if includeEmpty { - filtered = append(filtered, pubKey) - } - continue - } - - // Check if any hosts match pattern - for _, hostPattern := range hosts { - match, err := filepath.Match(hostPattern, host) - if err != nil { - return nil, err - } - - if match { - filtered = append(filtered, pubKey) - continue - } - } - } - - return filtered, nil -} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go deleted file mode 100644 index a2df787dd..000000000 --- a/vendor/github.com/docker/libtrust/hash.go +++ /dev/null @@ -1,56 +0,0 @@ -package libtrust - -import ( - "crypto" - _ "crypto/sha256" // Registrer SHA224 and SHA256 - _ "crypto/sha512" // Registrer SHA384 and SHA512 - "fmt" -) - -type signatureAlgorithm struct { - algHeaderParam string - hashID crypto.Hash -} - -func (h *signatureAlgorithm) HeaderParam() string { - return h.algHeaderParam -} - -func (h *signatureAlgorithm) HashID() crypto.Hash { - return h.hashID -} - -var ( - rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} - rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} - rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} - es256 = &signatureAlgorithm{"ES256", crypto.SHA256} - es384 = &signatureAlgorithm{"ES384", crypto.SHA384} - es512 = &signatureAlgorithm{"ES512", crypto.SHA512} -) - -func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { - switch { - case alg == "RS256": - return rs256, nil - case alg == "RS384": - return rs384, nil - case alg == "RS512": - return rs512, nil - default: - return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) - } -} - -func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { - switch { - case hashID == crypto.SHA512: - return rs512 - case hashID == crypto.SHA384: - return rs384 - case hashID == crypto.SHA256: - fallthrough - default: - return rs256 - } -} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go deleted file mode 100644 index cb2ca9a76..000000000 --- a/vendor/github.com/docker/libtrust/jsonsign.go +++ /dev/null @@ -1,657 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "sort" - "time" - "unicode" -) - -var ( - // ErrInvalidSignContent is used when the content to be signed is invalid. - ErrInvalidSignContent = errors.New("invalid sign content") - - // ErrInvalidJSONContent is used when invalid json is encountered. - ErrInvalidJSONContent = errors.New("invalid json content") - - // ErrMissingSignatureKey is used when the specified signature key - // does not exist in the JSON content. - ErrMissingSignatureKey = errors.New("missing signature key") -) - -type jsHeader struct { - JWK PublicKey `json:"jwk,omitempty"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c,omitempty"` -} - -type jsSignature struct { - Header jsHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected,omitempty"` -} - -type jsSignaturesSorted []jsSignature - -func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } -func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } - -func (jsbkid jsSignaturesSorted) Less(i, j int) bool { - ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() - si, sj := jsbkid[i].Signature, jsbkid[j].Signature - - if ki == kj { - return si < sj - } - - return ki < kj -} - -type signKey struct { - PrivateKey - Chain []*x509.Certificate -} - -// JSONSignature represents a signature of a json object. -type JSONSignature struct { - payload string - signatures []jsSignature - indent string - formatLength int - formatTail []byte -} - -func newJSONSignature() *JSONSignature { - return &JSONSignature{ - signatures: make([]jsSignature, 0, 1), - } -} - -// Payload returns the encoded payload of the signature. This -// payload should not be signed directly -func (js *JSONSignature) Payload() ([]byte, error) { - return joseBase64UrlDecode(js.payload) -} - -func (js *JSONSignature) protectedHeader() (string, error) { - protected := map[string]interface{}{ - "formatLength": js.formatLength, - "formatTail": joseBase64UrlEncode(js.formatTail), - "time": time.Now().UTC().Format(time.RFC3339), - } - protectedBytes, err := json.Marshal(protected) - if err != nil { - return "", err - } - - return joseBase64UrlEncode(protectedBytes), nil -} - -func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { - buf := make([]byte, len(js.payload)+len(protectedHeader)+1) - copy(buf, protectedHeader) - buf[len(protectedHeader)] = '.' - copy(buf[len(protectedHeader)+1:], js.payload) - return buf, nil -} - -// Sign adds a signature using the given private key. -func (js *JSONSignature) Sign(key PrivateKey) error { - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - js.signatures = append(js.signatures, jsSignature{ - Header: jsHeader{ - JWK: key.PublicKey(), - Algorithm: algorithm, - }, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// SignWithChain adds a signature using the given private key -// and setting the x509 chain. The public key of the first element -// in the chain must be the public key corresponding with the sign key. -func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { - // Ensure key.Chain[0] is public key for key - //key.Chain.PublicKey - //key.PublicKey().CryptoPublicKey() - - // Verify chain - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - header := jsHeader{ - Chain: make([]string, len(chain)), - Algorithm: algorithm, - } - - for i, cert := range chain { - header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - - js.signatures = append(js.signatures, jsSignature{ - Header: header, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// Verify verifies all the signatures and returns the list of -// public keys used to sign. Any x509 chains are not checked. -func (js *JSONSignature) Verify() ([]PublicKey, error) { - keys := make([]PublicKey, len(js.signatures)) - for i, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - } else if signature.Header.JWK != nil { - publicKey = signature.Header.JWK - } else { - return nil, errors.New("missing public key") - } - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - - keys[i] = publicKey - } - return keys, nil -} - -// VerifyChains verifies all the signatures and the chains associated -// with each signature and returns the list of verified chains. -// Signatures without an x509 chain are not checked. -func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { - chains := make([][]*x509.Certificate, 0, len(js.signatures)) - for _, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - intermediates := x509.NewCertPool() - if len(signature.Header.Chain) > 1 { - intermediateChain := signature.Header.Chain[1:] - for i := range intermediateChain { - certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) - if err != nil { - return nil, err - } - intermediate, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - intermediates.AddCert(intermediate) - } - } - - verifyOptions := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: ca, - } - - verifiedChains, err := cert.Verify(verifyOptions) - if err != nil { - return nil, err - } - chains = append(chains, verifiedChains...) - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - } - - } - return chains, nil -} - -// JWS returns JSON serialized JWS according to -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 -func (js *JSONSignature) JWS() ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("missing signature") - } - - sort.Sort(jsSignaturesSorted(js.signatures)) - - jsonMap := map[string]interface{}{ - "payload": js.payload, - "signatures": js.signatures, - } - - return json.MarshalIndent(jsonMap, "", " ") -} - -func notSpace(r rune) bool { - return !unicode.IsSpace(r) -} - -func detectJSONIndent(jsonContent []byte) (indent string) { - if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { - quoteIndex := bytes.IndexRune(jsonContent[1:], '"') - if quoteIndex > 0 { - indent = string(jsonContent[2 : quoteIndex+1]) - } - } - return -} - -type jsParsedHeader struct { - JWK json.RawMessage `json:"jwk"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c"` -} - -type jsParsedSignature struct { - Header jsParsedHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected"` -} - -// ParseJWS parses a JWS serialized JSON object into a Json Signature. -func ParseJWS(content []byte) (*JSONSignature, error) { - type jsParsed struct { - Payload string `json:"payload"` - Signatures []jsParsedSignature `json:"signatures"` - } - parsed := &jsParsed{} - err := json.Unmarshal(content, parsed) - if err != nil { - return nil, err - } - if len(parsed.Signatures) == 0 { - return nil, errors.New("missing signatures") - } - payload, err := joseBase64UrlDecode(parsed.Payload) - if err != nil { - return nil, err - } - - js, err := NewJSONSignature(payload) - if err != nil { - return nil, err - } - js.signatures = make([]jsSignature, len(parsed.Signatures)) - for i, signature := range parsed.Signatures { - header := jsHeader{ - Algorithm: signature.Header.Algorithm, - } - if signature.Header.Chain != nil { - header.Chain = signature.Header.Chain - } - if signature.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) - if err != nil { - return nil, err - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signature.Signature, - Protected: signature.Protected, - } - } - - return js, nil -} - -// NewJSONSignature returns a new unsigned JWS from a json byte array. -// JSONSignature will need to be signed before serializing or storing. -// Optionally, one or more signatures can be provided as byte buffers, -// containing serialized JWS signatures, to assemble a fully signed JWS -// package. It is the callers responsibility to ensure uniqueness of the -// provided signatures. -func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { - var dataMap map[string]interface{} - err := json.Unmarshal(content, &dataMap) - if err != nil { - return nil, err - } - - js := newJSONSignature() - js.indent = detectJSONIndent(content) - - js.payload = joseBase64UrlEncode(content) - - // Find trailing } and whitespace, put in protected header - closeIndex := bytes.LastIndexFunc(content, notSpace) - if content[closeIndex] != '}' { - return nil, ErrInvalidJSONContent - } - lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) - if content[lastRuneIndex] == ',' { - return nil, ErrInvalidJSONContent - } - js.formatLength = lastRuneIndex + 1 - js.formatTail = content[js.formatLength:] - - if len(signatures) > 0 { - for _, signature := range signatures { - var parsedJSig jsParsedSignature - - if err := json.Unmarshal(signature, &parsedJSig); err != nil { - return nil, err - } - - // TODO(stevvooe): A lot of the code below is repeated in - // ParseJWS. It will require more refactoring to fix that. - jsig := jsSignature{ - Header: jsHeader{ - Algorithm: parsedJSig.Header.Algorithm, - }, - Signature: parsedJSig.Signature, - Protected: parsedJSig.Protected, - } - - if parsedJSig.Header.Chain != nil { - jsig.Header.Chain = parsedJSig.Header.Chain - } - - if parsedJSig.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) - if err != nil { - return nil, err - } - jsig.Header.JWK = publicKey - } - - js.signatures = append(js.signatures, jsig) - } - } - - return js, nil -} - -// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or -// struct. JWS will need to be signed before serializing or storing. -func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { - switch content.(type) { - case map[string]interface{}: - case struct{}: - default: - return nil, errors.New("invalid data type") - } - - js := newJSONSignature() - js.indent = " " - - payload, err := json.MarshalIndent(content, "", js.indent) - if err != nil { - return nil, err - } - js.payload = joseBase64UrlEncode(payload) - - // Remove '\n}' from formatted section, put in protected header - js.formatLength = len(payload) - 2 - js.formatTail = payload[js.formatLength:] - - return js, nil -} - -func readIntFromMap(key string, m map[string]interface{}) (int, bool) { - value, ok := m[key] - if !ok { - return 0, false - } - switch v := value.(type) { - case int: - return v, true - case float64: - return int(v), true - default: - return 0, false - } -} - -func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { - value, ok := m[key] - if !ok { - return "", false - } - v, ok = value.(string) - return -} - -// ParsePrettySignature parses a formatted signature into a -// JSON signature. If the signatures are missing the format information -// an error is thrown. The formatted signature must be created by -// the same method as format signature. -func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { - var contentMap map[string]json.RawMessage - err := json.Unmarshal(content, &contentMap) - if err != nil { - return nil, fmt.Errorf("error unmarshalling content: %s", err) - } - sigMessage, ok := contentMap[signatureKey] - if !ok { - return nil, ErrMissingSignatureKey - } - - var signatureBlocks []jsParsedSignature - err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) - if err != nil { - return nil, fmt.Errorf("error unmarshalling signatures: %s", err) - } - - js := newJSONSignature() - js.signatures = make([]jsSignature, len(signatureBlocks)) - - for i, signatureBlock := range signatureBlocks { - protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) - if err != nil { - return nil, fmt.Errorf("base64 decode error: %s", err) - } - var protectedHeader map[string]interface{} - err = json.Unmarshal(protectedBytes, &protectedHeader) - if err != nil { - return nil, fmt.Errorf("error unmarshalling protected header: %s", err) - } - - formatLength, ok := readIntFromMap("formatLength", protectedHeader) - if !ok { - return nil, errors.New("missing formatted length") - } - encodedTail, ok := readStringFromMap("formatTail", protectedHeader) - if !ok { - return nil, errors.New("missing formatted tail") - } - formatTail, err := joseBase64UrlDecode(encodedTail) - if err != nil { - return nil, fmt.Errorf("base64 decode error on tail: %s", err) - } - if js.formatLength == 0 { - js.formatLength = formatLength - } else if js.formatLength != formatLength { - return nil, errors.New("conflicting format length") - } - if len(js.formatTail) == 0 { - js.formatTail = formatTail - } else if bytes.Compare(js.formatTail, formatTail) != 0 { - return nil, errors.New("conflicting format tail") - } - - header := jsHeader{ - Algorithm: signatureBlock.Header.Algorithm, - Chain: signatureBlock.Header.Chain, - } - if signatureBlock.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) - if err != nil { - return nil, fmt.Errorf("error unmarshalling public key: %s", err) - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signatureBlock.Signature, - Protected: signatureBlock.Protected, - } - } - if js.formatLength > len(content) { - return nil, errors.New("invalid format length") - } - formatted := make([]byte, js.formatLength+len(js.formatTail)) - copy(formatted, content[:js.formatLength]) - copy(formatted[js.formatLength:], js.formatTail) - js.indent = detectJSONIndent(formatted) - js.payload = joseBase64UrlEncode(formatted) - - return js, nil -} - -// PrettySignature formats a json signature into an easy to read -// single json serialized object. -func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("no signatures") - } - payload, err := joseBase64UrlDecode(js.payload) - if err != nil { - return nil, err - } - payload = payload[:js.formatLength] - - sort.Sort(jsSignaturesSorted(js.signatures)) - - var marshalled []byte - var marshallErr error - if js.indent != "" { - marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) - } else { - marshalled, marshallErr = json.Marshal(js.signatures) - } - if marshallErr != nil { - return nil, marshallErr - } - - buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) - buf.Write(payload) - buf.WriteByte(',') - if js.indent != "" { - buf.WriteByte('\n') - buf.WriteString(js.indent) - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\": ") - buf.Write(marshalled) - buf.WriteByte('\n') - } else { - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\":") - buf.Write(marshalled) - } - buf.WriteByte('}') - - return buf.Bytes(), nil -} - -// Signatures provides the signatures on this JWS as opaque blobs, sorted by -// keyID. These blobs can be stored and reassembled with payloads. Internally, -// they are simply marshaled json web signatures but implementations should -// not rely on this. -func (js *JSONSignature) Signatures() ([][]byte, error) { - sort.Sort(jsSignaturesSorted(js.signatures)) - - var sb [][]byte - for _, jsig := range js.signatures { - p, err := json.Marshal(jsig) - if err != nil { - return nil, err - } - - sb = append(sb, p) - } - - return sb, nil -} - -// Merge combines the signatures from one or more other signatures into the -// method receiver. If the payloads differ for any argument, an error will be -// returned and the receiver will not be modified. -func (js *JSONSignature) Merge(others ...*JSONSignature) error { - merged := js.signatures - for _, other := range others { - if js.payload != other.payload { - return fmt.Errorf("payloads differ from merge target") - } - merged = append(merged, other.signatures...) - } - - js.signatures = merged - return nil -} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go deleted file mode 100644 index 73642db2a..000000000 --- a/vendor/github.com/docker/libtrust/key.go +++ /dev/null @@ -1,253 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" -) - -// PublicKey is a generic interface for a Public Key. -type PublicKey interface { - // KeyType returns the key type for this key. For elliptic curve keys, - // this value should be "EC". For RSA keys, this value should be "RSA". - KeyType() string - // KeyID returns a distinct identifier which is unique to this Public Key. - // The format generated by this library is a base32 encoding of a 240 bit - // hash of the public key data divided into 12 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - KeyID() string - // Verify verifyies the signature of the data in the io.Reader using this - // Public Key. The alg parameter should identify the digital signature - // algorithm which was used to produce the signature and should be - // supported by this public key. Returns a nil error if the signature - // is valid. - Verify(data io.Reader, alg string, signature []byte) error - // CryptoPublicKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The type - // is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPublicKey() crypto.PublicKey - // These public keys can be serialized to the standard JSON encoding for - // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web - // Algorithms. - MarshalJSON() ([]byte, error) - // These keys can also be serialized to the standard PEM encoding. - PEMBlock() (*pem.Block, error) - // The string representation of a key is its key type and ID. - String() string - AddExtendedField(string, interface{}) - GetExtendedField(string) interface{} -} - -// PrivateKey is a generic interface for a Private Key. -type PrivateKey interface { - // A PrivateKey contains all fields and methods of a PublicKey of the - // same type. The MarshalJSON method also outputs the private key as a - // JSON Web Key, and the PEMBlock method outputs the private key as a - // PEM block. - PublicKey - // PublicKey returns the PublicKey associated with this PrivateKey. - PublicKey() PublicKey - // Sign signs the data read from the io.Reader using a signature algorithm - // supported by the private key. If the specified hashing algorithm is - // supported by this key, that hash function is used to generate the - // signature otherwise the the default hashing algorithm for this key is - // used. Returns the signature and identifier of the algorithm used. - Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) - // CryptoPrivateKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The - // type is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPrivateKey() crypto.PrivateKey -} - -// FromCryptoPublicKey returns a libtrust PublicKey representation of the given -// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { - switch cryptoPublicKey := cryptoPublicKey.(type) { - case *ecdsa.PublicKey: - return fromECPublicKey(cryptoPublicKey) - case *rsa.PublicKey: - return fromRSAPublicKey(cryptoPublicKey), nil - default: - return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) - } -} - -// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given -// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { - switch cryptoPrivateKey := cryptoPrivateKey.(type) { - case *ecdsa.PrivateKey: - return fromECPrivateKey(cryptoPrivateKey) - case *rsa.PrivateKey: - return fromRSAPrivateKey(cryptoPrivateKey), nil - default: - return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) - } -} - -// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust -// PublicKey or an error if there is a problem with the encoding. -func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - return pubKeyFromPEMBlock(pemBlock) -} - -// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of -// PEM blocks appended one after the other and returns a slice of PublicKey -// objects that it finds. -func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { - pubKeys := []PublicKey{} - - for { - var pemBlock *pem.Block - pemBlock, data = pem.Decode(data) - if pemBlock == nil { - break - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - pubKey, err := pubKeyFromPEMBlock(pemBlock) - if err != nil { - return nil, err - } - - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust -// PrivateKey or an error if there is a problem with the encoding. -func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } - - var key PrivateKey - - switch { - case pemBlock.Type == "RSA PRIVATE KEY": - rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) - } - key = fromRSAPrivateKey(rsaPrivateKey) - case pemBlock.Type == "EC PRIVATE KEY": - ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) - } - key, err = fromECPrivateKey(ecPrivateKey) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) - } - - addPEMHeadersToKey(pemBlock, key.PublicKey()) - - return key, nil -} - -// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic -// Public Key to be used with libtrust. -func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Public Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Public Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC public key. - return ecPublicKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA public key. - return rsaPublicKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Public Key type not supported: %q\n", kty, - ) - } -} - -// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set -// and returns a slice of Public Key objects. -func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { - rawKeys, err := loadJSONKeySetRaw(data) - if err != nil { - return nil, err - } - - pubKeys := make([]PublicKey, 0, len(rawKeys)) - - for _, rawKey := range rawKeys { - pubKey, err := UnmarshalPublicKeyJWK(rawKey) - if err != nil { - return nil, err - } - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic -// Private Key to be used with libtrust. -func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Private Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Private Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC private key. - return ecPrivateKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA private key. - return rsaPrivateKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Private Key type not supported: %q\n", kty, - ) - } -} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go deleted file mode 100644 index c526de545..000000000 --- a/vendor/github.com/docker/libtrust/key_files.go +++ /dev/null @@ -1,255 +0,0 @@ -package libtrust - -import ( - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - "strings" -) - -var ( - // ErrKeyFileDoesNotExist indicates that the private key file does not exist. - ErrKeyFileDoesNotExist = errors.New("key file does not exist") -) - -func readKeyFileBytes(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - err = ErrKeyFileDoesNotExist - } else { - err = fmt.Errorf("unable to read key file %s: %s", filename, err) - } - - return nil, err - } - - return data, nil -} - -/* - Loading and Saving of Public and Private Keys in either PEM or JWK format. -*/ - -// LoadKeyFile opens the given filename and attempts to read a Private Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadKeyFile(filename string) (PrivateKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PrivateKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPrivateKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key JWK: %s", err) - } - } else { - key, err = UnmarshalPrivateKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key PEM: %s", err) - } - } - - return key, nil -} - -// LoadPublicKeyFile opens the given filename and attempts to read a Public Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadPublicKeyFile(filename string) (PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PublicKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPublicKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key JWK: %s", err) - } - } else { - key, err = UnmarshalPublicKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key PEM: %s", err) - } - } - - return key, nil -} - -// SaveKey saves the given key to a file using the provided filename. -// This process will overwrite any existing file at the provided location. -func SaveKey(filename string, key PrivateKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode private key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("unable to write private key file %s: %s", filename, err) - } - - return nil -} - -// SavePublicKey saves the given public key to the file. -func SavePublicKey(filename string, key PublicKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode public key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode public key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write public key file %s: %s", filename, err) - } - - return nil -} - -// Public Key Set files - -type jwkSet struct { - Keys []json.RawMessage `json:"keys"` -} - -// LoadKeySetFile loads a key set -func LoadKeySetFile(filename string) ([]PublicKey, error) { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return loadJSONKeySetFile(filename) - } - - // Must be a PEM format file - return loadPEMKeySetFile(filename) -} - -func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { - if len(data) == 0 { - // This is okay, just return an empty slice. - return []json.RawMessage{}, nil - } - - keySet := jwkSet{} - - err := json.Unmarshal(data, &keySet) - if err != nil { - return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) - } - - return keySet.Keys, nil -} - -func loadJSONKeySetFile(filename string) ([]PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyJWKSet(contents) -} - -func loadPEMKeySetFile(filename string) ([]PublicKey, error) { - data, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyPEMBundle(data) -} - -// AddKeySetFile adds a key to a key set -func AddKeySetFile(filename string, key PublicKey) error { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return addKeySetJSONFile(filename, key) - } - - // Must be a PEM format file - return addKeySetPEMFile(filename, key) -} - -func addKeySetJSONFile(filename string, key PublicKey) error { - encodedKey, err := json.Marshal(key) - if err != nil { - return fmt.Errorf("unable to encode trusted client key: %s", err) - } - - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return err - } - - rawEntries, err := loadJSONKeySetRaw(contents) - if err != nil { - return err - } - - rawEntries = append(rawEntries, json.RawMessage(encodedKey)) - entriesWrapper := jwkSet{Keys: rawEntries} - - encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") - if err != nil { - return fmt.Errorf("unable to encode trusted client keys: %s", err) - } - - err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) - } - - return nil -} - -func addKeySetPEMFile(filename string, key PublicKey) error { - // Encode to PEM, open file for appending, write PEM. - file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) - } - defer file.Close() - - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encoded trusted key: %s", err) - } - - _, err = file.Write(pem.EncodeToMemory(pemBlock)) - if err != nil { - return fmt.Errorf("unable to write trusted keys file: %s", err) - } - - return nil -} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go deleted file mode 100644 index 9a98ae357..000000000 --- a/vendor/github.com/docker/libtrust/key_manager.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "sync" -) - -// ClientKeyManager manages client keys on the filesystem -type ClientKeyManager struct { - key PrivateKey - clientFile string - clientDir string - - clientLock sync.RWMutex - clients []PublicKey - - configLock sync.Mutex - configs []*tls.Config -} - -// NewClientKeyManager loads a new manager from a set of key files -// and managed by the given private key. -func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { - m := &ClientKeyManager{ - key: trustKey, - clientFile: clientFile, - clientDir: clientDir, - } - if err := m.loadKeys(); err != nil { - return nil, err - } - // TODO Start watching file and directory - - return m, nil -} - -func (c *ClientKeyManager) loadKeys() (err error) { - // Load authorized keys file - var clients []PublicKey - if c.clientFile != "" { - clients, err = LoadKeySetFile(c.clientFile) - if err != nil { - return fmt.Errorf("unable to load authorized keys: %s", err) - } - } - - // Add clients from authorized keys directory - files, err := ioutil.ReadDir(c.clientDir) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to open authorized keys directory: %s", err) - } - for _, f := range files { - if !f.IsDir() { - publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) - if err != nil { - return fmt.Errorf("unable to load authorized key file: %s", err) - } - clients = append(clients, publicKey) - } - } - - c.clientLock.Lock() - c.clients = clients - c.clientLock.Unlock() - - return nil -} - -// RegisterTLSConfig registers a tls configuration to manager -// such that any changes to the keys may be reflected in -// the tls client CA pool -func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { - c.clientLock.RLock() - certPool, err := GenerateCACertPool(c.key, c.clients) - if err != nil { - return fmt.Errorf("CA pool generation error: %s", err) - } - c.clientLock.RUnlock() - - tlsConfig.ClientCAs = certPool - - c.configLock.Lock() - c.configs = append(c.configs, tlsConfig) - c.configLock.Unlock() - - return nil -} - -// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for -// libtrust identity authentication for the domain specified -func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - if err := clients.RegisterTLSConfig(tlsConfig); err != nil { - return nil, err - } - - // Generate cert - ips, domains, err := parseAddr(addr) - if err != nil { - return nil, err - } - // add domain that it expects clients to use - domains = append(domains, domain) - x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - return tlsConfig, nil -} - -// NewCertAuthTLSConfig creates a tls.Config for the server to use for -// certificate authentication -func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - cert, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Verify client certificates against a CA? - if caPath != "" { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - - return tlsConfig, nil -} - -func newTLSConfig() *tls.Config { - return &tls.Config{ - NextProtos: []string{"http/1.1"}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } -} - -// parseAddr parses an address into an array of IPs and domains -func parseAddr(addr string) ([]net.IP, []string, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, nil, err - } - var domains []string - var ips []net.IP - ip := net.ParseIP(host) - if ip != nil { - ips = []net.IP{ip} - } else { - domains = []string{host} - } - return ips, domains, nil -} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go deleted file mode 100644 index dac4cacf2..000000000 --- a/vendor/github.com/docker/libtrust/rsa_key.go +++ /dev/null @@ -1,427 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * RSA DSA PUBLIC KEY - */ - -// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. -type rsaPublicKey struct { - *rsa.PublicKey - extended map[string]interface{} -} - -func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { - return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} -} - -// KeyType returns the JWK key type for RSA keys, i.e., "RSA". -func (k *rsaPublicKey) KeyType() string { - return "RSA" -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *rsaPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *rsaPublicKey) String() string { - return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this Public Key. -// The alg parameter should be the name of the JWA digital signature algorithm -// which was used to produce the signature and should be supported by this -// public key. Returns a nil error if the signature is valid. -func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // Verify the signature of the given date, return non-nil error if valid. - sigAlg, err := rsaSignatureAlgorithmByName(alg) - if err != nil { - return fmt.Errorf("unable to verify Signature: %s", err) - } - - hasher := sigAlg.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) - if err != nil { - return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *rsaPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) - jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *rsaPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract 'n', 'e', and 'kid' and check for - // consistency. - - // Get the modulus parameter N. - nB64Url, err := stringFromMap(jwk, "n") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - n, err := parseRSAModulusParam(nB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - // Get the public exponent E. - eB64Url, err := stringFromMap(jwk, "e") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - e, err := parseRSAPublicExponentParam(eB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - key := &rsaPublicKey{ - PublicKey: &rsa.PublicKey{N: n, E: e}, - } - - // Key ID is optional, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) - } - } - - if _, ok := jwk["d"]; ok { - return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") - } - - key.extended = jwk - - return key, nil -} - -/* - * RSA DSA PRIVATE KEY - */ - -// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. -type rsaPrivateKey struct { - rsaPublicKey - *rsa.PrivateKey -} - -func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { - return &rsaPrivateKey{ - *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), - cryptoPrivateKey, - } -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *rsaPrivateKey) PublicKey() PublicKey { - return &k.rsaPublicKey -} - -func (k *rsaPrivateKey) String() string { - return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the RSA private key. If the specified hashing algorithm is supported by -// this key, that hash function is used to generate the signature otherwise the -// the default hashing algorithm for this key is used. Returns the signature -// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", -// "RS512". -func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) - hasher := sigAlg.HashID().New() - - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - - alg = sigAlg.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *rsaPrivateKey) toMap() map[string]interface{} { - k.Precompute() // Make sure the precomputed values are stored. - jwk := k.rsaPublicKey.toMap() - - jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) - jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) - jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) - jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) - jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) - jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) - - otherPrimes := k.Primes[2:] - - if len(otherPrimes) > 0 { - otherPrimesInfo := make([]interface{}, len(otherPrimes)) - for i, r := range otherPrimes { - otherPrimeInfo := make(map[string]string, 3) - otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) - crtVal := k.Precomputed.CRTValues[i] - otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) - otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) - otherPrimesInfo[i] = otherPrimeInfo - } - jwk["oth"] = otherPrimesInfo - } - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) -} - -func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { - // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that - // only the private key exponent 'd' is REQUIRED, the others are just for - // signature/decryption optimizations and SHOULD be included when the JWK - // is produced. We MAY choose to accept a JWK which only includes 'd', but - // we're going to go ahead and not choose to accept it without the extra - // fields. Only the 'oth' field will be optional (for multi-prime keys). - privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) - } - firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - var oth interface{} - if _, ok := jwk["oth"]; ok { - oth = jwk["oth"] - delete(jwk, "oth") - } - - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract the public key information, then extract the private - // key values. - publicKey, err := rsaPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - privateKey := &rsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: privateExponent, - Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, - Precomputed: rsa.PrecomputedValues{ - Dp: firstFactorCRT, - Dq: secondFactorCRT, - Qinv: crtCoeff, - }, - } - - if oth != nil { - // Should be an array of more JSON objects. - otherPrimesInfo, ok := oth.([]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") - } - numOtherPrimeFactors := len(otherPrimesInfo) - if numOtherPrimeFactors == 0 { - return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") - } - otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) - productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) - crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) - - for i, val := range otherPrimesInfo { - otherPrimeinfo, ok := val.(map[string]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") - } - - otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - crtValue := crtValues[i] - crtValue.Exp = otherFactorCRT - crtValue.Coeff = otherCrtCoeff - crtValue.R = productOfPrimes - otherPrimeFactors[i] = otherPrimeFactor - productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) - } - - privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) - privateKey.Precomputed.CRTValues = crtValues - } - - key := &rsaPrivateKey{ - rsaPublicKey: *publicKey, - PrivateKey: privateKey, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { - k = new(rsaPrivateKey) - k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - - k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. -func GenerateRSA2048PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(2048) - if err != nil { - return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. -func GenerateRSA3072PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(3072) - if err != nil { - return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. -func GenerateRSA4096PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(4096) - if err != nil { - return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) - } - - return k, nil -} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go deleted file mode 100644 index a5a101d3f..000000000 --- a/vendor/github.com/docker/libtrust/util.go +++ /dev/null @@ -1,363 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/elliptic" - "crypto/tls" - "crypto/x509" - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/url" - "os" - "path/filepath" - "strings" - "time" -) - -// LoadOrCreateTrustKey will load a PrivateKey from the specified path -func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { - if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { - return nil, err - } - - trustKey, err := LoadKeyFile(trustKeyPath) - if err == ErrKeyFileDoesNotExist { - trustKey, err = GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("error generating key: %s", err) - } - - if err := SaveKey(trustKeyPath, trustKey); err != nil { - return nil, fmt.Errorf("error saving key file: %s", err) - } - - dir, file := filepath.Split(trustKeyPath) - if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { - return nil, fmt.Errorf("error saving public key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("error loading key file: %s", err) - } - return trustKey, nil -} - -// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity -// based authentication from the specified dockerUrl, the rootConfigPath and -// the server name to which it is connecting. -// If trustUnknownHosts is true it will automatically add the host to the -// known-hosts.json in rootConfigPath. -func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - trustKeyPath := filepath.Join(rootConfigPath, "key.json") - knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") - - u, err := url.Parse(dockerUrl) - if err != nil { - return nil, fmt.Errorf("unable to parse machine url") - } - - if u.Scheme == "unix" { - return nil, nil - } - - addr := u.Host - proto := "tcp" - - trustKey, err := LoadOrCreateTrustKey(trustKeyPath) - if err != nil { - return nil, fmt.Errorf("unable to load trust key: %s", err) - } - - knownHosts, err := LoadKeySetFile(knownHostsPath) - if err != nil { - return nil, fmt.Errorf("could not load trusted hosts file: %s", err) - } - - allowedHosts, err := FilterByHosts(knownHosts, addr, false) - if err != nil { - return nil, fmt.Errorf("error filtering hosts: %s", err) - } - - certPool, err := GenerateCACertPool(trustKey, allowedHosts) - if err != nil { - return nil, fmt.Errorf("Could not create CA pool: %s", err) - } - - tlsConfig.ServerName = serverName - tlsConfig.RootCAs = certPool - - x509Cert, err := GenerateSelfSignedClientCert(trustKey) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - tlsConfig.InsecureSkipVerify = true - - testConn, err := tls.Dial(proto, addr, tlsConfig) - if err != nil { - return nil, fmt.Errorf("tls Handshake error: %s", err) - } - - opts := x509.VerifyOptions{ - Roots: tlsConfig.RootCAs, - CurrentTime: time.Now(), - DNSName: tlsConfig.ServerName, - Intermediates: x509.NewCertPool(), - } - - certs := testConn.ConnectionState().PeerCertificates - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - - if _, err := certs[0].Verify(opts); err != nil { - if _, ok := err.(x509.UnknownAuthorityError); ok { - if trustUnknownHosts { - pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) - if err != nil { - return nil, fmt.Errorf("error extracting public key from cert: %s", err) - } - - pubKey.AddExtendedField("hosts", []string{addr}) - - if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { - return nil, fmt.Errorf("error adding machine to known hosts: %s", err) - } - } else { - return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) - } - } - } - - testConn.Close() - tlsConfig.InsecureSkipVerify = false - - return tlsConfig, nil -} - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters omitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - s = strings.Replace(s, "\n", "", -1) - s = strings.Replace(s, " ", "", -1) - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func keyIDEncode(b []byte) string { - s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") - var buf bytes.Buffer - var i int - for i = 0; i < len(s)/4-1; i++ { - start := i * 4 - end := start + 4 - buf.WriteString(s[start:end] + ":") - } - buf.WriteString(s[i*4:]) - return buf.String() -} - -func keyIDFromCryptoKey(pubKey PublicKey) string { - // Generate and return a 'libtrust' fingerprint of the public key. - // For an RSA key this should be: - // SHA256(DER encoded ASN1) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) - if err != nil { - return "" - } - hasher := crypto.SHA256.New() - hasher.Write(derBytes) - return keyIDEncode(hasher.Sum(nil)[:30]) -} - -func stringFromMap(m map[string]interface{}, key string) (string, error) { - val, ok := m[key] - if !ok { - return "", fmt.Errorf("%q value not specified", key) - } - - str, ok := val.(string) - if !ok { - return "", fmt.Errorf("%q value must be a string", key) - } - delete(m, key) - - return str, nil -} - -func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { - curveByteLen := (curve.Params().BitSize + 7) >> 3 - - cBytes, err := joseBase64UrlDecode(cB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - cByteLength := len(cBytes) - if cByteLength != curveByteLen { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) - } - return new(big.Int).SetBytes(cBytes), nil -} - -func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { - dBytes, err := joseBase64UrlDecode(dB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := curve.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - dByteLength := len(dBytes) - - if dByteLength != octetLength { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) - } - - return new(big.Int).SetBytes(dBytes), nil -} - -func parseRSAModulusParam(nB64Url string) (*big.Int, error) { - nBytes, err := joseBase64UrlDecode(nB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(nBytes), nil -} - -func serializeRSAPublicExponentParam(e int) []byte { - // We MUST use the minimum number of octets to represent E. - // E is supposed to be 65537 for performance and security reasons - // and is what golang's rsa package generates, but it might be - // different if imported from some other generator. - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(e)) - var i int - for i = 0; i < 8; i++ { - if buf[i] != 0 { - break - } - } - return buf[i:] -} - -func parseRSAPublicExponentParam(eB64Url string) (int, error) { - eBytes, err := joseBase64UrlDecode(eB64Url) - if err != nil { - return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - // Only the minimum number of bytes were used to represent E, but - // binary.BigEndian.Uint32 expects at least 4 bytes, so we need - // to add zero padding if necassary. - byteLen := len(eBytes) - buf := make([]byte, 4-byteLen, 4) - eBytes = append(buf, eBytes...) - - return int(binary.BigEndian.Uint32(eBytes)), nil -} - -func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { - b64Url, err := stringFromMap(m, key) - if err != nil { - return nil, err - } - - paramBytes, err := joseBase64UrlDecode(b64Url) - if err != nil { - return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(paramBytes), nil -} - -func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { - pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} - for k, v := range headers { - switch val := v.(type) { - case string: - pemBlock.Headers[k] = val - case []string: - if k == "hosts" { - pemBlock.Headers[k] = strings.Join(val, ",") - } else { - // Return error, non-encodable type - } - default: - // Return error, non-encodable type - } - } - - return pemBlock, nil -} - -func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { - cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) - } - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - return nil, err - } - - addPEMHeadersToKey(pemBlock, pubKey) - - return pubKey, nil -} - -func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { - for key, value := range pemBlock.Headers { - var safeVal interface{} - if key == "hosts" { - safeVal = strings.Split(value, ",") - } else { - safeVal = value - } - pubKey.AddExtendedField(key, safeVal) - } -} diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml index 74e54aa77..a02ed3f24 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml +++ b/vendor/github.com/fsouza/go-dockerclient/.travis.yml @@ -3,21 +3,21 @@ language: go go: - 1.11.x - 1.12.x + - 1.13rc1 os: - linux - osx - windows env: matrix: - - GOARCH=amd64 DEP_TOOL=mod GO111MODULE=on - - GOARCH=386 DEP_TOOL=mod GO111MODULE=on - - GOARCH=amd64 DEP_TOOL=dep GO111MODULE=off - - GOARCH=386 DEP_TOOL=dep GO111MODULE=off + - GOARCH=amd64 + - GOARCH=386 global: - GOPROXY=https://proxy.golang.org + - GO111MODULE=on install: - travis-scripts/win-setup.bash - - make testdeps DEP_TOOL=${DEP_TOOL} + - make testdeps script: - travis_wait 25 travis-scripts/run-tests.bash services: @@ -26,8 +26,8 @@ matrix: fast_finish: true exclude: - os: osx - env: GOARCH=386 DEP_TOOL=dep GO111MODULE=off + env: GOARCH=386 - os: osx - env: GOARCH=386 DEP_TOOL=mod GO111MODULE=on + env: GOARCH=386 allow_failures: - os: windows diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS index 4e9f7b8e8..a8ae99976 100644 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -15,6 +15,7 @@ Andrews Medina Andrey Sibiryov Andy Goldstein Anirudh Aithal +Antoine Brechon Antonio Murdaca Artem Sidorenko Arthur Rodrigues @@ -51,6 +52,7 @@ Damien Lespiau Damon Wang Dan Williams Daniel, Dao Quang Minh +Daniel Black Daniel Garcia Daniel Hess Daniel Hiltgen @@ -185,6 +187,7 @@ Tim Schindler Timothy St. Clair Tobi Knaup Tom Wilkie +Tomas Knappek Tonic ttyh061 upccup diff --git a/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml b/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml deleted file mode 100644 index 4be9ee73a..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml +++ /dev/null @@ -1,23 +0,0 @@ -[[constraint]] - name = "github.com/Microsoft/go-winio" - version = "v0.4.11" - -[[constraint]] - branch = "master" - name = "github.com/docker/docker" - -[[constraint]] - name = "github.com/docker/go-units" - version = "v0.3.3" - -[[constraint]] - name = "github.com/google/go-cmp" - version = "v0.2.0" - -[[constraint]] - name = "github.com/gorilla/mux" - version = "v1.6.2 - v1.7.0" - -[[override]] - name = "github.com/docker/libnetwork" - revision = "19279f0492417475b6bfbd0aa529f73e8f178fb5" diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile index be71a3c26..858adec1b 100644 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -7,8 +7,6 @@ test \ integration -DEP_TOOL ?= mod - all: test staticcheck: @@ -23,12 +21,7 @@ fmt: gofumpt -s -w . testdeps: -ifeq ($(DEP_TOOL), dep) - GO111MODULE=off go get -u github.com/golang/dep/cmd/dep - dep ensure -v -else go mod download -endif pretest: staticcheck fmtcheck diff --git a/vendor/github.com/fsouza/go-dockerclient/README.md b/vendor/github.com/fsouza/go-dockerclient/README.md index 501b967f7..f310ccc92 100644 --- a/vendor/github.com/fsouza/go-dockerclient/README.md +++ b/vendor/github.com/fsouza/go-dockerclient/README.md @@ -26,6 +26,12 @@ feature may get implemented/merged. For new projects, using the official SDK is probably more appropriate as go-dockerclient lags behind the official SDK. +When using the official SDK, keep in mind that because of how the its +dependencies are organized, you may need some extra steps in order to be able +to import it in your projects (see +[#784](https://github.com/fsouza/go-dockerclient/issues/784) and +[moby/moby#28269](https://github.com/moby/moby/issues/28269)). + ## Example ```go @@ -85,7 +91,8 @@ func main() { If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables `DOCKER_HOST`, -`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, you can use NewClientFromEnv. +`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, `DOCKER_API_VERSION`, you can use +NewClientFromEnv. ```go @@ -118,12 +125,13 @@ Commited code must pass: Running ``make test`` will check all of these. You can reformat the code with ``make fmt``. -## Vendoring / Modules +## Modules + +go-dockerclient supports Go modules. -go-dockerclient supports [dep](https://github.com/golang/dep/) for vendoring -and can also be installed as a module. If you're using dep or Go modules, you -should be able to pick go-dockerclient releases and get the proper -dependencies. +If you're using dep, you can check the [releases +page](https://github.com/fsouza/go-dockerclient/releases) for the latest +release fully compatible with dep. With other vendoring tools, users might need to specify go-dockerclient's dependencies manually. diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml index 6f49a8b84..793d88b7a 100644 --- a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml +++ b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml @@ -5,20 +5,12 @@ clone_folder: c:\gopath\src\github.com\fsouza\go-dockerclient environment: GOPATH: c:\gopath GOPROXY: https://proxy.golang.org + GO111MODULE: on SKIP_FMT_CHECK: 1 matrix: - - GOVERSION: &go111 "1.11.10" - DEP_TOOL: dep - GO111MODULE: off - - GOVERSION: &go112 "1.12.5" - DEP_TOOL: dep - GO111MODULE: off - - GOVERSION: *go111 - DEP_TOOL: mod - GO111MODULE: on - - GOVERSION: *go112 - DEP_TOOL: mod - GO111MODULE: on + - GOVERSION: "1.11.13" + - GOVERSION: "1.12.9" + - GOVERSION: "1.13rc1" install: - choco install make - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% @@ -26,8 +18,8 @@ install: - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL build_script: - - make testdeps DEP_TOOL=%DEP_TOOL% + - make testdeps test_script: - - make pretest gotest DEP_TOOL=%DEP_TOOL% + - make pretest gotest matrix: fast_finish: true diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go index e7de97701..0062e5c5c 100644 --- a/vendor/github.com/fsouza/go-dockerclient/auth.go +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -93,9 +93,11 @@ func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) { func cfgPaths(dockerConfigEnv string, homeEnv string) []string { var paths []string if dockerConfigEnv != "" { + paths = append(paths, path.Join(dockerConfigEnv, "plaintext-passwords.json")) paths = append(paths, path.Join(dockerConfigEnv, "config.json")) } if homeEnv != "" { + paths = append(paths, path.Join(homeEnv, ".docker", "plaintext-passwords.json")) paths = append(paths, path.Join(homeEnv, ".docker", "config.json")) paths = append(paths, path.Join(homeEnv, ".dockercfg")) } diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go index 9953e3253..6f394bfc1 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -262,12 +262,14 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri } // NewClientFromEnv returns a Client instance ready for communication created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH, +// and DOCKER_API_VERSION. // // See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. // See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +// See https://github.com/moby/moby/blob/28d7dba41d0c0d9c7f0dafcc79d3c59f2b3f5dc3/client/options.go#L51 func NewClientFromEnv() (*Client, error) { - client, err := NewVersionedClientFromEnv("") + client, err := NewVersionedClientFromEnv(os.Getenv("DOCKER_API_VERSION")) if err != nil { return nil, err } @@ -527,7 +529,20 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error return err } } - req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) + return c.streamUrl(method, c.getURL(path), streamOptions) +} + +func (c *Client) streamUrl(method, url string, streamOptions streamOptions) error { + if (method == "POST" || method == "PUT") && streamOptions.in == nil { + streamOptions.in = bytes.NewReader(nil) + } + if !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + req, err := http.NewRequest(method, url, streamOptions.in) if err != nil { return err } @@ -858,6 +873,28 @@ func (c *Client) getURL(path string) string { return fmt.Sprintf("%s%s", urlStr, path) } +func (c *Client) getPath(basepath string, opts interface{}) (string, error) { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol { + urlStr = "" + } + queryStr, requiredAPIVersion := queryStringVersion(opts) + + if c.requestedAPIVersion != nil { + if c.requestedAPIVersion.GreaterThanOrEqualTo(requiredAPIVersion) { + return fmt.Sprintf("%s/v%s%s?%s", urlStr, c.requestedAPIVersion, basepath, queryStr), nil + } else { + return "", fmt.Errorf("API %s requires version %s, requested version %s is insufficient", + basepath, requiredAPIVersion, c.requestedAPIVersion) + } + } + if requiredAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s?%s", urlStr, requiredAPIVersion, basepath, queryStr), nil + } else { + return fmt.Sprintf("%s%s?%s", urlStr, basepath, queryStr), nil + } +} + // getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX // domain socket to the given path. func (c *Client) getFakeNativeURL(path string) string { @@ -874,17 +911,18 @@ func (c *Client) getFakeNativeURL(path string) string { return fmt.Sprintf("%s%s", urlStr, path) } -func queryString(opts interface{}) string { +func queryStringVersion(opts interface{}) (string, APIVersion) { if opts == nil { - return "" + return "", nil } value := reflect.ValueOf(opts) if value.Kind() == reflect.Ptr { value = value.Elem() } if value.Kind() != reflect.Struct { - return "" + return "", nil } + var apiVersion APIVersion = nil items := url.Values(map[string][]string{}) for i := 0; i < value.NumField(); i++ { field := value.Type().Field(i) @@ -897,53 +935,80 @@ func queryString(opts interface{}) string { } else if key == "-" { continue } - addQueryStringValue(items, key, value.Field(i)) + if addQueryStringValue(items, key, value.Field(i)) { + verstr := field.Tag.Get("ver") + if verstr != "" { + ver, _ := NewAPIVersion(verstr) + if apiVersion == nil { + apiVersion = ver + } else if ver.GreaterThan(apiVersion) { + apiVersion = ver + } + } + } } - return items.Encode() + return items.Encode(), apiVersion } -func addQueryStringValue(items url.Values, key string, v reflect.Value) { +func queryString(opts interface{}) string { + s, _ := queryStringVersion(opts) + return s +} + +func addQueryStringValue(items url.Values, key string, v reflect.Value) bool { switch v.Kind() { case reflect.Bool: if v.Bool() { items.Add(key, "1") + return true } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if v.Int() > 0 { items.Add(key, strconv.FormatInt(v.Int(), 10)) + return true } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: if v.Uint() > 0 { items.Add(key, strconv.FormatUint(v.Uint(), 10)) + return true } case reflect.Float32, reflect.Float64: if v.Float() > 0 { items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + return true } case reflect.String: if v.String() != "" { items.Add(key, v.String()) + return true } case reflect.Ptr: if !v.IsNil() { if b, err := json.Marshal(v.Interface()); err == nil { items.Add(key, string(b)) + return true } } case reflect.Map: if len(v.MapKeys()) > 0 { if b, err := json.Marshal(v.Interface()); err == nil { items.Add(key, string(b)) + return true } } case reflect.Array, reflect.Slice: vLen := v.Len() + var valuesAdded int if vLen > 0 { for i := 0; i < vLen; i++ { - addQueryStringValue(items, key, v.Index(i)) + if addQueryStringValue(items, key, v.Index(i)) { + valuesAdded += 1 + } } } + return valuesAdded > 0 } + return false } // Error represents failures in the API. It represents a failure from the API. diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go index e40c9c2e4..898646fbf 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -53,6 +53,7 @@ type APIMount struct { Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` RW bool `json:"RW,omitempty" yaml:"RW,omitempty" toml:"RW,omitempty"` Propagation string `json:"Propagation,omitempty" yaml:"Propagation,omitempty" toml:"Propagation,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` } // APIContainers represents each container in the list returned by diff --git a/vendor/github.com/fsouza/go-dockerclient/go.mod b/vendor/github.com/fsouza/go-dockerclient/go.mod index b4e75afc8..90183e9d2 100644 --- a/vendor/github.com/fsouza/go-dockerclient/go.mod +++ b/vendor/github.com/fsouza/go-dockerclient/go.mod @@ -4,23 +4,23 @@ go 1.11 require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 - github.com/Microsoft/go-winio v0.4.12 + github.com/Microsoft/go-winio v0.4.14 + github.com/Microsoft/hcsshim v0.8.6 // indirect github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect - github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16 + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/protobuf v1.3.0 // indirect - github.com/google/go-cmp v0.3.0 - github.com/gorilla/mux v1.7.2 + github.com/google/go-cmp v0.3.1 + github.com/gorilla/mux v1.7.3 github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v0.1.1 // indirect - github.com/pkg/errors v0.8.1 // indirect - github.com/sirupsen/logrus v1.3.0 // indirect - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 + golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 + google.golang.org/grpc v1.22.0 // indirect gotest.tools v2.2.0+incompatible // indirect ) diff --git a/vendor/github.com/fsouza/go-dockerclient/go.sum b/vendor/github.com/fsouza/go-dockerclient/go.sum index bc93f2dae..00c823418 100644 --- a/vendor/github.com/fsouza/go-dockerclient/go.sum +++ b/vendor/github.com/fsouza/go-dockerclient/go.sum @@ -1,27 +1,36 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= -github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16 h1:dmUn0SuGx7unKFwxyeQ/oLUHhEfZosEDrpmYM+6MTuc= -github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU= +github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM= github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -38,23 +47,39 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa h1:lqti/xP+yD/6zH5TqEwx2MilNIJY5Vbc6Qr8J3qyPIQ= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go index f9e1c6f04..31b6c53f4 100644 --- a/vendor/github.com/fsouza/go-dockerclient/image.go +++ b/vendor/github.com/fsouza/go-dockerclient/image.go @@ -88,7 +88,7 @@ var ( // InputStream are provided in BuildImageOptions ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") - // ErrMustSpecifyNames is the error rreturned when the Names field on + // ErrMustSpecifyNames is the error returned when the Names field on // ExportImagesOptions is nil or empty ErrMustSpecifyNames = errors.New("must specify at least one name to export") ) @@ -288,6 +288,7 @@ func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error type PullImageOptions struct { Repository string `qs:"fromImage"` Tag string + Platform string `ver:"1.32"` // Only required for Docker Engine 1.9 or 1.10 w/ Remote API < 1.21 // and Docker Engine < 1.9 @@ -318,12 +319,15 @@ func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error opts.Repository = parts[0] opts.Tag = parts[1] } - return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) + return c.createImage(&opts, headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) } -func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { - path := "/images/create?" + qs - return c.stream("POST", path, streamOptions{ +func (c *Client) createImage(opts interface{}, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { + url, err := c.getPath("/images/create", opts) + if err != nil { + return err + } + return c.streamUrl("POST", url, streamOptions{ setRawTerminal: true, headers: headers, in: in, @@ -394,7 +398,29 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error { if opts.Names == nil || len(opts.Names) == 0 { return ErrMustSpecifyNames } - return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ + // API < 1.25 allows multiple name values + // 1.25 says name must be a comma separated list + var err error + var exporturl string + if c.requestedAPIVersion.GreaterThanOrEqualTo(apiVersion125) { + var str string = opts.Names[0] + for _, val := range opts.Names[1:] { + str += "," + val + } + exporturl, err = c.getPath("/images/get", ExportImagesOptions{ + Names: []string{str}, + OutputStream: opts.OutputStream, + InactivityTimeout: opts.InactivityTimeout, + Context: opts.Context, + + }) + } else { + exporturl, err = c.getPath("/images/get", &opts) + } + if err != nil { + return err + } + return c.streamUrl("GET", exporturl, streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -435,7 +461,7 @@ func (c *Client) ImportImage(opts ImportImageOptions) error { opts.InputStream = f opts.Source = "-" } - return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) + return c.createImage(&opts, nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) } // BuildImageOptions present the set of informations available for building an @@ -609,7 +635,7 @@ func isURL(u string) bool { } func headersWithAuth(auths ...registryAuth) (map[string]string, error) { - var headers = make(map[string]string) + headers := make(map[string]string) for _, auth := range auths { if auth.isEmpty() { diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go index 8c03b9ae6..2331e08bf 100644 --- a/vendor/github.com/fsouza/go-dockerclient/network.go +++ b/vendor/github.com/fsouza/go-dockerclient/network.go @@ -114,15 +114,26 @@ func (c *Client) NetworkInfo(id string) (*Network, error) { type CreateNetworkOptions struct { Name string `json:"Name" yaml:"Name" toml:"Name"` Driver string `json:"Driver" yaml:"Driver" toml:"Driver"` + Scope string `json:"Scope" yaml:"Scope" toml:"Scope"` IPAM *IPAMOptions `json:"IPAM,omitempty" yaml:"IPAM" toml:"IPAM"` + ConfigFrom *NetworkConfigFrom `json:"ConfigFrom,omitempty" yaml:"ConfigFrom" toml:"ConfigFrom"` Options map[string]interface{} `json:"Options" yaml:"Options" toml:"Options"` Labels map[string]string `json:"Labels" yaml:"Labels" toml:"Labels"` CheckDuplicate bool `json:"CheckDuplicate" yaml:"CheckDuplicate" toml:"CheckDuplicate"` Internal bool `json:"Internal" yaml:"Internal" toml:"Internal"` EnableIPv6 bool `json:"EnableIPv6" yaml:"EnableIPv6" toml:"EnableIPv6"` + Attachable bool `json:"Attachable" yaml:"Attachable" toml:"Attachable"` + ConfigOnly bool `json:"ConfigOnly" yaml:"ConfigOnly" toml:"ConfigOnly"` + Ingress bool `json:"Ingress" yaml:"Ingress" toml:"Ingress"` Context context.Context `json:"-"` } +// NetworkConfigFrom is used in network creation for specifying the source of a +// network configuration. +type NetworkConfigFrom struct { + Network string `json:"Network" yaml:"Network" toml:"Network"` +} + // IPAMOptions controls IP Address Management when creating a network // // See https://goo.gl/T8kRVH for more details. @@ -225,6 +236,7 @@ type EndpointConfig struct { GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + DriverOpts map[string]string `json:"DriverOpts,omitempty" yaml:"DriverOpts,omitempty" toml:"DriverOpts,omitempty"` } // EndpointIPAMConfig represents IPAM configurations for an diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 79668ff5c..a4b8c0cd3 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -38,7 +38,6 @@ package proto import ( "fmt" "log" - "os" "reflect" "sort" "strconv" @@ -194,7 +193,7 @@ func (p *Properties) Parse(s string) { // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + log.Printf("proto: tag has too few fields: %q", s) return } @@ -214,7 +213,7 @@ func (p *Properties) Parse(s string) { p.WireType = WireBytes // no numeric converter for non-numeric types default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + log.Printf("proto: tag has unknown wire type: %q", s) return } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 000000000..70276e8f5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,141 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 000000000..78ee52334 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +package any + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} + +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 000000000..493294255 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,154 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 000000000..c0d595da7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 000000000..26d1ca2fb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 000000000..0d681ee21 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package duration + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} + +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 000000000..975fce41a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..8da0df01a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,132 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + ts := &tspb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 000000000..31cd846de --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package timestamp + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} + +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 000000000..eafb3fa03 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,135 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index d003ad922..000000000 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - - -matrix: - include: - - go: 1.7.x - - go: 1.8.x - - go: 1.9.x - - go: 1.10.x - - go: 1.11.x - - go: 1.x - env: LATEST=true - - go: tip - allow_failures: - - go: tip - -install: - - # Skip - -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - if [[ "$LATEST" = true ]]; then go vet .; fi - - go test -v -race ./... diff --git a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md deleted file mode 100644 index 232be82e4..000000000 --- a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,11 +0,0 @@ -**What version of Go are you running?** (Paste the output of `go version`) - - -**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`) - - -**Describe your problem** (and what you have tried so far) - - -**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it) - diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index c661599ab..92e422eed 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -2,6 +2,7 @@ [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) [![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) +[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) ![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) @@ -29,6 +30,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv * [Walking Routes](#walking-routes) * [Graceful Shutdown](#graceful-shutdown) * [Middleware](#middleware) +* [Handling CORS Requests](#handling-cors-requests) * [Testing Handlers](#testing-handlers) * [Full Example](#full-example) @@ -491,6 +493,73 @@ r.Use(amw.Middleware) Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. +### Handling CORS Requests + +[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. + +* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` +* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route +* If you do not specify any methods, then: +> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. + +Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: + +```go +package main + +import ( + "net/http" + "github.com/gorilla/mux" +) + +func main() { + r := mux.NewRouter() + + // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers + r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) + r.Use(mux.CORSMethodMiddleware(r)) + + http.ListenAndServe(":8080", r) +} + +func fooHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + if r.Method == http.MethodOptions { + return + } + + w.Write([]byte("foo")) +} +``` + +And an request to `/foo` using something like: + +```bash +curl localhost:8080/foo -v +``` + +Would look like: + +```bash +* Trying ::1... +* TCP_NODELAY set +* Connected to localhost (::1) port 8080 (#0) +> GET /foo HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.59.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS +< Access-Control-Allow-Origin: * +< Date: Fri, 28 Jun 2019 20:13:30 GMT +< Content-Length: 3 +< Content-Type: text/plain; charset=utf-8 +< +* Connection #0 to host localhost left intact +foo +``` + ### Testing Handlers Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index 38957deea..bd5a38b55 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -295,7 +295,7 @@ A more complex authentication middleware, which maps session token to users, cou r := mux.NewRouter() r.HandleFunc("/", handler) - amw := authenticationMiddleware{} + amw := authenticationMiddleware{tokenUsers: make(map[string]string)} amw.Populate() r.Use(amw.Middleware) diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go index ceb812cee..cf2b26dc0 100644 --- a/vendor/github.com/gorilla/mux/middleware.go +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -32,37 +32,19 @@ func (r *Router) useInterface(mw middleware) { r.middlewares = append(r.middlewares, mw) } -// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header -// on a request, by matching routes based only on paths. It also handles -// OPTIONS requests, by settings Access-Control-Allow-Methods, and then -// returning without calling the next http handler. +// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header +// on requests for routes that have an OPTIONS method matcher to all the method matchers on +// the route. Routes that do not explicitly handle OPTIONS requests will not be processed +// by the middleware. See examples for usage. func CORSMethodMiddleware(r *Router) MiddlewareFunc { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - var allMethods []string - - err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { - for _, m := range route.matchers { - if _, ok := m.(*routeRegexp); ok { - if m.Match(req, &RouteMatch{}) { - methods, err := route.GetMethods() - if err != nil { - return err - } - - allMethods = append(allMethods, methods...) - } - break - } - } - return nil - }) - + allMethods, err := getAllMethodsForRoute(r, req) if err == nil { - w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ",")) - - if req.Method == "OPTIONS" { - return + for _, v := range allMethods { + if v == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) + } } } @@ -70,3 +52,28 @@ func CORSMethodMiddleware(r *Router) MiddlewareFunc { }) } } + +// getAllMethodsForRoute returns all the methods from method matchers matching a given +// request. +func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { + var allMethods []string + + err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { + for _, m := range route.matchers { + if _, ok := m.(*routeRegexp); ok { + if m.Match(req, &RouteMatch{}) { + methods, err := route.GetMethods() + if err != nil { + return err + } + + allMethods = append(allMethods, methods...) + } + break + } + } + return nil + }) + + return allMethods, err +} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 744875676..1eb75ef68 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -1,4 +1,5 @@ Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 202f36a99..413ec3b3c 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -243,7 +243,7 @@ func (s *Scratch) buildDtable() error { nBits := s.actualTableLog - byte(highBits(uint32(nextState))) s.decTable[u].nbBits = nBits newState := (nextState << nBits) - tableSize - if newState > tableSize { + if newState >= tableSize { return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) } if newState == uint16(u) && nBits == 0 { @@ -281,8 +281,12 @@ func (s *Scratch) decompress() error { tmp[off+2] = s1.nextFast() tmp[off+3] = s2.nextFast() off += 4 + // When off is 0, we have overflowed and should write. if off == 0 { s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } } } } else { @@ -296,7 +300,7 @@ func (s *Scratch) decompress() error { off += 4 if off == 0 { s.Out = append(s.Out, tmp...) - off = 0 + // When off is 0, we have overflowed and should write. if len(s.Out) >= s.DecompressLimit { return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 261c54274..43b4815b3 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -193,14 +193,26 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) off += 4 if off == 0 { + if len(s.Out)+256 > s.MaxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } s.Out = append(s.Out, tmp...) } } + if len(s.Out)+int(off) > s.MaxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } s.Out = append(s.Out, tmp[:off]...) for !br.finished() { br.fill() + if len(s.Out) >= s.MaxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } s.Out = append(s.Out, decode()) } return s.Out, br.close() @@ -218,6 +230,9 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { if len(in) < 6+(4*1) { return nil, errors.New("input too small") } + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } // TODO: We do not detect when we overrun a buffer, except if the last one does. var br [4]bitReader @@ -247,9 +262,13 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { dstOut := s.Out dstEvery := (dstSize + 3) / 4 + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := s.dt.single[:tlSize] + decode := func(br *bitReader) byte { val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */ - v := s.dt.single[val] + v := single[val&tlMask] br.bitsRead += v.nBits return v.byte } @@ -279,7 +298,7 @@ bigloop: off += 2 if off == bufoff { if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun") + return nil, errors.New("corruption detected: stream overrun 1") } copy(dstOut, tmp[:bufoff]) copy(dstOut[dstEvery:], tmp[bufoff:bufoff*2]) @@ -288,15 +307,15 @@ bigloop: off = 0 dstOut = dstOut[bufoff:] // There must at least be 3 buffers left. - if len(dstOut) < dstEvery*3+3 { - return nil, errors.New("corruption detected: stream overrun") + if len(dstOut) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") } } } if off > 0 { ioff := int(off) if len(dstOut) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun") + return nil, errors.New("corruption detected: stream overrun 3") } copy(dstOut, tmp[:off]) copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2]) @@ -311,7 +330,7 @@ bigloop: for !br.finished() { br.fill() if offset >= len(dstOut) { - return nil, errors.New("corruption detected: stream overrun") + return nil, errors.New("corruption detected: stream overrun 4") } dstOut[offset] = decode(br) offset++ diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 50d02e440..6f823f94d 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -35,6 +35,9 @@ var ( // ErrTooBig is return if input is too large for a single block. ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") ) type ReusePolicy uint8 @@ -86,6 +89,11 @@ type Scratch struct { // Reuse will specify the reuse policy Reuse ReusePolicy + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + br byteReader symbolLen uint16 // Length of active part of the symbol table. maxCount int // count of the most probable symbol @@ -116,6 +124,9 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.TableLog > tableLogMax { return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax) } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } if s.clearCount && s.maxCount == 0 { for i := range s.count { s.count[i] = 0 diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s index e6179f65e..1c66e3723 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -184,9 +184,7 @@ tagLit60Plus: // checks. In the asm version, we code it once instead of once per switch case. ADDQ CX, SI SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ SI, R13 JA errCorrupt // case x == 60: @@ -232,9 +230,7 @@ tagCopy4: ADDQ $5, SI // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ SI, R13 JA errCorrupt // length = 1 + int(src[s-5])>>2 @@ -251,9 +247,7 @@ tagCopy2: ADDQ $3, SI // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ SI, R13 JA errCorrupt // length = 1 + int(src[s-3])>>2 @@ -277,9 +271,7 @@ tagCopy: ADDQ $2, SI // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ SI, R13 JA errCorrupt // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go index 8c9f2049b..94a96c5d7 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_other.go +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -85,14 +85,28 @@ func decode(dst, src []byte) int { if offset <= 0 || d < offset || length > len(dst)-d { return decodeErrCodeCorrupt } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs // forwards, even if the slices overlap. Conceptually, this is: // // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] } + d += length } if d != len(dst) { return decodeErrCodeCorrupt diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 670f98af4..d9d38b23f 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -34,7 +34,8 @@ For now, a high speed (fastest) and medium-fast (default) compressor has been im The "Fastest" compression ratio is roughly equivalent to zstd level 1. The "Default" compression ration is roughly equivalent to zstd level 3 (default). -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. The compression ratio compared to stdlib is around level 3, but usually 3x as fast. +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2. @@ -217,7 +218,8 @@ silesia.tar zstd 3 211947520 66793301 1377 146.79 As part of the development process a *Snappy* -> *Zstandard* converter was also built. -This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. Note that a single block is not framed. +This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. +Note that a single block is not framed. Conversion is done by converting the stream directly from Snappy without intermediate full decoding. Therefore the compression ratio is much less than what can be done by a full decompression diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index aca1cb85d..3e161ea15 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -63,7 +63,8 @@ var ( type blockDec struct { // Raw source data of the block. - data []byte + data []byte + dataStorage []byte // Destination of the decoded data. dst []byte @@ -145,18 +146,18 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.data) < cSize { + if cap(b.dataStorage) < cSize { if b.lowMem { - b.data = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize) } else { - b.data = make([]byte, 0, maxBlockSize) + b.dataStorage = make([]byte, 0, maxBlockSize) } } if cap(b.dst) <= maxBlockSize { b.dst = make([]byte, 0, maxBlockSize+1) } var err error - b.data, err = br.readBig(cSize, b.data[:0]) + b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debug { println("Reading block:", err) @@ -447,6 +448,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { } // Use our out buffer. huff.Out = b.literalBuf[:0] + huff.MaxDecodedSize = litRegenSize if fourStreams { literals, err = huff.Decompress4X(literals, litRegenSize) } else { @@ -609,6 +611,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Use our out buffer. huff = hist.huffTree huff.Out = b.literalBuf[:0] + huff.MaxDecodedSize = litRegenSize if fourStreams { literals, err = huff.Decompress4X(literals, litRegenSize) } else { diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index cba24c76d..9d9151a0e 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -155,14 +155,17 @@ func (h *literalsHeader) setSize(regenLen int) { } // setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int) { +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) // Only retain 2 bits const mask = 3 lh := uint64(*h & mask) switch { case compBits <= 10 && inBits <= 10: - lh |= (1 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) if debug { const mmask = (1 << 24) - 1 n := (lh >> 4) & mmask @@ -175,8 +178,14 @@ func (h *literalsHeader) setSizes(compLen, inLen int) { } case compBits <= 14 && inBits <= 14: lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } case compBits <= 18 && inBits <= 18: lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } default: panic("internal error: block too big") } @@ -307,12 +316,30 @@ func (b *blockEnc) encodeLits() error { return nil } - // TODO: Switch to 1X when less than x bytes. - out, reUsed, err := huff0.Compress4X(b.literals, b.litEnc) - // Bail out of compression is too little. - if len(out) > (len(b.literals) - len(b.literals)>>4) { + var ( + out []byte + reUsed, single bool + err error + ) + if len(b.literals) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + if len(out) > len(b.literals)-len(b.literals)>>4 { + // Bail out of compression is too little. + err = huff0.ErrIncompressible + } + } else if len(b.literals) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + if len(out) > len(b.literals)-len(b.literals)>>4 { + // Bail out of compression is too little. + err = huff0.ErrIncompressible + } + } else { err = huff0.ErrIncompressible } + switch err { case huff0.ErrIncompressible: if debug { @@ -351,7 +378,7 @@ func (b *blockEnc) encodeLits() error { lh.setType(literalsBlockCompressed) } // Set sizes - lh.setSizes(len(out), len(b.literals)) + lh.setSizes(len(out), len(b.literals), single) bh.setSize(uint32(len(out) + lh.size() + 1)) // Write block headers. @@ -381,16 +408,23 @@ func (b *blockEnc) encode() error { b.output = bh.appendTo(b.output) var ( - out []byte - reUsed bool - err error + out []byte + reUsed, single bool + err error ) - if len(b.literals) > 32 { - // TODO: Switch to 1X on small blocks. + if len(b.literals) >= 1024 { + // Use 4 Streams. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) if len(out) > len(b.literals)-len(b.literals)>>4 { err = huff0.ErrIncompressible } + } else if len(b.literals) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + if len(out) > len(b.literals)-len(b.literals)>>4 { + err = huff0.ErrIncompressible + } } else { err = huff0.ErrIncompressible } @@ -435,7 +469,7 @@ func (b *blockEnc) encode() error { } } } - lh.setSizes(len(out), len(b.literals)) + lh.setSizes(len(out), len(b.literals), single) if debug { printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) println("Adding literal header:", lh) diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 4a8460476..3538063f1 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -116,6 +116,9 @@ func (r *readerWrapper) readByte() (byte, error) { } func (r *readerWrapper) skipN(n int) error { - _, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } return err } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f06bff6f6..f4db3096a 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -127,6 +127,9 @@ func (d *Decoder) Read(p []byte) (int, error) { } } if len(d.current.b) > 0 { + if debug { + println("returning", n, "still bytes left:", len(d.current.b)) + } // Only return error at end of block return n, nil } @@ -159,6 +162,9 @@ func (d *Decoder) Reset(r io.Reader) error { // If bytes buffer and < 1MB, do sync decoding anyway. if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if debug { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } b := bb.Bytes() dst, err := d.DecodeAll(b, nil) if err == nil { @@ -167,6 +173,9 @@ func (d *Decoder) Reset(r io.Reader) error { d.current.b = dst d.current.err = err d.current.flushed = true + if debug { + println("sync decode to ", len(dst), "bytes, err:", err) + } return nil } @@ -193,7 +202,9 @@ func (d *Decoder) drainOutput() { d.current.cancel = nil } if d.current.d != nil { - println("re-adding current decoder", d.current.d, len(d.decoders)) + if debug { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } d.decoders <- d.current.d d.current.d = nil d.current.b = nil @@ -206,7 +217,9 @@ func (d *Decoder) drainOutput() { select { case v := <-d.current.output: if v.d != nil { - println("got decoder", v.d) + if debug { + printf("re-adding decoder %p", v.d) + } d.decoders <- v.d } if v.err == errEndOfStream { @@ -259,20 +272,22 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if d.current.err == ErrDecoderClosed { return dst, ErrDecoderClosed } - //println(len(d.frames), len(d.decoders), d.current) + + // Grab a block decoder and frame decoder. block, frame := <-d.decoders, <-d.frames defer func() { + if debug { + printf("re-adding decoder: %p", block) + } d.decoders <- block frame.rawInput = nil + frame.bBuf = nil d.frames <- frame }() - if cap(dst) == 0 { - // Allocate 1MB by default. - dst = make([]byte, 0, 1<<20) - } - br := byteBuf(input) + frame.bBuf = input + for { - err := frame.reset(&br) + err := frame.reset(&frame.bBuf) if err == io.EOF { return dst, nil } @@ -290,11 +305,21 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { dst = dst2 } } + if cap(dst) == 0 { + // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size. + size := frame.WindowSize * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + dst = make([]byte, 0, frame.WindowSize) + } + dst, err = frame.runDecoder(dst, block) if err != nil { return dst, err } - if len(br) == 0 { + if len(frame.bBuf) == 0 { break } } @@ -305,6 +330,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If an error occurs d.err will be set. func (d *Decoder) nextBlock() { if d.current.d != nil { + if debug { + printf("re-adding current decoder %p", d.current.d) + } d.decoders <- d.current.d d.current.d = nil } @@ -377,6 +405,9 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { defer d.streamWg.Done() frame := newFrameDec(d.o) for stream := range inStream { + if debug { + println("got new stream") + } br := readerWrapper{r: stream.r} decodeStream: for { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 52c1eb066..2ac9cd2dd 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -50,15 +50,17 @@ func WithDecoderConcurrency(n int) DOption { } // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// (non-streaming) operations. -// Maxmimum and default is 1 << 63 bytes. +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// For streaming operations, the maximum window size is capped at 1<<30 bytes. +// Maximum and default is 1 << 63 bytes. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { - return errors.New("WithDecoderMaxmemory must be at least 1") + return errors.New("WithDecoderMaxMemory must be at least 1") } if n > 1<<63 { - return fmt.Errorf("WithDecoderMaxmemorymust be less than 1 << 63") + return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") } o.maxDecodedSize = n return nil diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 02c79814f..e120625d8 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -82,16 +82,11 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { stepSize++ } - // TEMPLATE - const kSearchStrength = 8 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s cv := load6432(src, s) - // nextHash is the hash at s - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -119,8 +114,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS = nextHashS & dFastShortTableMask - nextHashL = nextHashL & dFastLongTableMask + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -172,8 +167,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) continue } const repOff2 = 1 @@ -221,8 +214,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) // Swap offsets offset1, offset2 = offset2, offset1 continue @@ -296,8 +287,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) } // A 4-byte match has been found. Update recent offsets. @@ -345,38 +334,54 @@ encodeLoop: break encodeLoop } - // Index match start + 2 and end - 2 - index0 := s - l + 2 + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) index1 := s - 2 - if l == 4 { - // if l is 4, we would check the same place twice, so index s-1 instead. - index1++ - } cv0 := load6432(src, index0) cv1 := load6432(src, index1) - entry0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - entry1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.table[hash5(cv0, dFastShortTableBits)&dFastShortTableMask] = entry0 - e.longTable[hash8(cv0, dFastLongTableBits)&dFastLongTableMask] = entry0 - e.table[hash5(cv1, dFastShortTableBits)&dFastShortTableMask] = entry1 - e.longTable[hash8(cv1, dFastLongTableBits)&dFastLongTableMask] = entry1 + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) + + if !canRepeat { + continue + } // Check offset 2 - if o2 := s - offset2; canRepeat && o2 > 0 && load3232(src, o2) == uint32(cv) { + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + // We have at least 4 byte match. // No need to check backwards. We come straight from a match l := 4 + e.matchlen(s+4, o2+4, src) - // Store this, since we have it. + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL&dFastLongTableMask] = entry - e.table[nextHashS&dFastShortTableMask] = entry + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 + // Since litlen is always 0, this is offset 1. seq.offset = 1 s += l @@ -389,12 +394,10 @@ encodeLoop: // Swap offset 1 and 2. offset1, offset2 = offset2, offset1 if s >= sLimit { + // Finished break encodeLoop } - // Prepare next loop. cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index a8edaa888..6f388de04 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -124,8 +124,6 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // nextEmit is where in src the next emitLiteral should start from. nextEmit := s cv := load6432(src, s) - // nextHash is the hash at s - nextHash := hash6(cv, hashLog) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -157,8 +155,8 @@ encodeLoop: panic("offset0 was 0") } - nextHash2 := hash6(cv>>8, hashLog) & tableMask - nextHash = nextHash & tableMask + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -207,8 +205,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - //nextHash = hashLen(cv, hashLog, mls) - nextHash = hash6(cv, hashLog) continue } coffset0 := s - (candidate.offset - e.cur) @@ -245,7 +241,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHash = hash6(cv, hashLog) } // A 4-byte match has been found. We'll later see if more than 4 bytes. offset2 = offset1 @@ -292,15 +287,16 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHash = hash6(cv, hashLog) // Check offset 2 - if o2 := s - offset2; canRepeat && o2 > 0 && load3232(src, o2) == uint32(cv) { + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match l := 4 + e.matchlen(s+4, o2+4, src) + // Store this, since we have it. - e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: uint32(cv)} + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 // Since litlen is always 0, this is offset 1. @@ -319,7 +315,6 @@ encodeLoop: } // Prepare next loop. cv = load6432(src, s) - nextHash = hash6(cv, hashLog) } } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index ed028f5a7..b7011be29 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -211,6 +211,7 @@ func (e *Encoder) nextBlock(final bool) error { s.wWg.Wait() _, s.err = s.w.Write(blk.output) s.nWritten += int64(len(blk.output)) + s.eofWritten = true } return s.err } @@ -256,7 +257,12 @@ func (e *Encoder) nextBlock(final bool) error { } s.wWg.Done() }() - err := blk.encode() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode() + } switch err { case errIncompressible: if debug { @@ -443,7 +449,13 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(src) == 0 { blk.last = true } - err := blk.encode() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode() + } + switch err { case errIncompressible: if debug { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 6e210c4a0..a8559e900 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -6,7 +6,7 @@ import ( "strings" ) -// DOption is an option for creating a encoder. +// EOption is an option for creating a encoder. type EOption func(*encoderOptions) error // options retains accumulated state of multiple options. diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 8fa264fc2..839a95fbf 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -39,6 +39,9 @@ type frameDec struct { rawInput byteBuffer + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + // asyncRunning indicates whether the async routine processes input on 'decoding'. asyncRunning bool asyncRunningMu sync.Mutex @@ -59,6 +62,9 @@ func newFrameDec(o decoderOptions) *frameDec { o: o, maxWindowSize: 1 << 30, } + if d.maxWindowSize > o.maxDecodedSize { + d.maxWindowSize = o.maxDecodedSize + } return &d } @@ -232,7 +238,9 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { - println("decoding new block") + if debug { + printf("decoding new block %p:%p", block, block.data) + } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) @@ -280,13 +288,13 @@ func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } - var tmp [8]byte - gotB := d.crc.Sum(tmp[:0]) + var tmp [4]byte + got := d.crc.Sum64() // Flip to match file order. - gotB[0] = gotB[7] - gotB[1] = gotB[6] - gotB[2] = gotB[5] - gotB[3] = gotB[4] + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) // We can overwrite upper tmp now want := d.rawInput.readSmall(4) @@ -295,8 +303,10 @@ func (d *frameDec) checkCRC() error { return io.ErrUnexpectedEOF } - if !bytes.Equal(gotB[:4], want) { - println("CRC Check Failed:", gotB[:4], "!=", want) + if !bytes.Equal(tmp[:], want) { + if debug { + println("CRC Check Failed:", tmp[:], "!=", want) + } return ErrCRCMismatch } println("CRC ok") @@ -423,7 +433,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) { } } -// runDecoder will create a sync decoder that will decodeAsync a block of data. +// runDecoder will create a sync decoder that will decode a block of data. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { // TODO: Init to dictionary d.history.reset() diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index a86d00bc3..9efe34feb 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -184,29 +184,75 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - addBits uint8 // Used for symbols until transformed. - nbBits uint8 - baseline uint32 +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) } // decSymbolValue returns the transformed decSymbol for the given symbol. func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { if int(symb) >= len(t) { - return decSymbol{}, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) } lu := t[symb] - return decSymbol{ - addBits: lu.addBits, - baseline: lu.baseLine, - }, nil + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil } // setRLE will set the decoder til RLE mode. func (s *fseDecoder) setRLE(symbol decSymbol) { s.actualTableLog = 0 - s.maxBits = symbol.addBits + s.maxBits = symbol.addBits() s.dt[0] = symbol } @@ -220,7 +266,7 @@ func (s *fseDecoder) buildDtable() error { { for i, v := range s.norm[:s.symbolLen] { if v == -1 { - s.dt[highThreshold].addBits = uint8(i) + s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- symbolNext[i] = 1 } else { @@ -235,7 +281,7 @@ func (s *fseDecoder) buildDtable() error { position := uint32(0) for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { - s.dt[position].addBits = uint8(ss) + s.dt[position].setAddBits(uint8(ss)) position = (position + step) & tableMask for position > highThreshold { // lowprob area @@ -253,11 +299,11 @@ func (s *fseDecoder) buildDtable() error { { tableSize := uint16(1 << s.actualTableLog) for u, v := range s.dt[:tableSize] { - symbol := v.addBits + symbol := v.addBits() nextState := symbolNext[symbol] symbolNext[symbol] = nextState + 1 nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].nbBits = nBits + s.dt[u&maxTableMask].setNBits(nBits) newState := (nextState << nBits) - tableSize if newState > tableSize { return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) @@ -266,7 +312,7 @@ func (s *fseDecoder) buildDtable() error { // Seems weird that this is possible with nbits > 0. return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) } - s.dt[u&maxTableMask].newState = newState + s.dt[u&maxTableMask].setNewState(newState) } } return nil @@ -279,25 +325,21 @@ func (s *fseDecoder) transform(t []baseOffset) error { tableSize := uint16(1 << s.actualTableLog) s.maxBits = 0 for i, v := range s.dt[:tableSize] { - if int(v.addBits) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits, len(t)) + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) } - lu := t[v.addBits] + lu := t[add] if lu.addBits > s.maxBits { s.maxBits = lu.addBits } - s.dt[i&maxTableMask] = decSymbol{ - newState: v.newState, - nbBits: v.nbBits, - addBits: lu.addBits, - baseline: lu.baseLine, - } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v } return nil } type fseState struct { - // TODO: Check if *[1 << maxTablelog]decSymbol is faster. dt []decSymbol state decSymbol } @@ -312,26 +354,31 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { // next returns the current symbol and sets the next state. // At least tablelog bits must be available in the bit reader. func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits)) - s.state = s.dt[s.state.newState+lowBits] + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] } // finished returns true if all bits have been read from the bitstream // and the next state would require reading bits from the input. func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits > 0 + return br.finished() && s.state.nbBits() > 0 } // final returns the current state symbol without decoding the next. func (s *fseState) final() (int, uint8) { - return int(s.state.baseline), s.state.addBits + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() } // nextFast returns the next symbol and sets the next state. // This can only be used if no symbols are 0 bits. // At least tablelog bits must be available in the bit reader. func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := uint16(br.getBitsFast(s.state.nbBits)) - s.state = s.dt[s.state.newState+lowBits] - return s.state.baseline, s.state.addBits + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() } diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index 819d87f88..4a752067f 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -64,7 +64,7 @@ func hash6(u uint64, h uint8) uint32 { return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) } -// hash6 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash7(u uint64, h uint8) uint32 { return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index cef69e35b..15a45f7b5 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -89,6 +89,10 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] // decode sequences from the stream with the provided history. func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + for i := seqs - 1; i >= 0; i-- { if br.overread() { printf("reading sequence %d, exceeded available data\n", seqs-i) @@ -96,10 +100,10 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } var litLen, matchOff, matchLen int if br.off > 4+((maxOffsetBits+16+16)>>3) { - litLen, matchOff, matchLen = s.nextFast(br) + litLen, matchOff, matchLen = s.nextFast(br, llState, mlState, ofState) br.fillFast() } else { - litLen, matchOff, matchLen = s.next(br) + litLen, matchOff, matchLen = s.next(br, llState, mlState, ofState) br.fill() } @@ -175,30 +179,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // This is the last sequence, so we shouldn't update state. break } - if true { - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits + b.nbBits + c.nbBits - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState] - s.offsets.state.state = s.offsets.state.dt[c.newState] - } else { - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits + b.nbBits) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState+lowBits] - - lowBits = uint16(bits >> (c.nbBits & 31)) - lowBits &= bitMask[b.nbBits&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState+lowBits] - lowBits = uint16(bits) & bitMask[c.nbBits&15] - s.offsets.state.state = s.offsets.state.dt[c.newState+lowBits] - } + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] } else { - s.updateAlt(br) + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] } } @@ -230,55 +229,49 @@ func (s *sequenceDecs) updateAlt(br *bitReader) { // Update all 3 states at once. Approx 20% faster. a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - nBits := a.nbBits + b.nbBits + c.nbBits + nBits := a.nbBits() + b.nbBits() + c.nbBits() if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState] - s.offsets.state.state = s.offsets.state.dt[c.newState] + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] return } bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits + b.nbBits) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState+lowBits] + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - lowBits = uint16(bits >> (c.nbBits & 31)) - lowBits &= bitMask[b.nbBits&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState+lowBits] + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - lowBits = uint16(bits) & bitMask[c.nbBits&15] - s.offsets.state.state = s.offsets.state.dt[c.newState+lowBits] + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] } // nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader) (ll, mo, ml int) { +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. - ll, llB := s.litLengths.state.final() - ml, mlB := s.matchLengths.state.final() - mo, moB := s.offsets.state.final() + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() // extra bits are stored in reverse order. br.fillFast() - if s.maxBits <= 32 { - mo += br.getBits(moB) - ml += br.getBits(mlB) - ll += br.getBits(llB) - } else { - mo += br.getBits(moB) + mo += br.getBits(moB) + if s.maxBits > 32 { br.fillFast() - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) } + ml += br.getBits(mlB) + ll += br.getBits(llB) - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup if moB > 1 { s.prevOffset[2] = s.prevOffset[1] s.prevOffset[1] = s.prevOffset[0] s.prevOffset[0] = mo return } - + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup if ll == 0 { // There is an exception though, when current sequence's literals_length = 0. // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, @@ -312,11 +305,11 @@ func (s *sequenceDecs) nextFast(br *bitReader) (ll, mo, ml int) { return } -func (s *sequenceDecs) next(br *bitReader) (ll, mo, ml int) { +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. - ll, llB := s.litLengths.state.final() - ml, mlB := s.matchLengths.state.final() - mo, moB := s.offsets.state.final() + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() // extra bits are stored in reverse order. br.fill() diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml index 16d1430aa..6294d337f 100644 --- a/vendor/github.com/mattn/go-shellwords/.travis.yml +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -1,8 +1,14 @@ language: go +sudo: false go: - tip + before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover + - go get -t -v ./... + script: - - $HOME/gopath/bin/goveralls -repotoken 2FMhp57u8LcstKL9B190fLTcEnBtAAiEL + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md index b1d235c78..9e1e65045 100644 --- a/vendor/github.com/mattn/go-shellwords/README.md +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -1,6 +1,6 @@ # go-shellwords -[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master) +[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords) [![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) Parse line as shell words. diff --git a/vendor/github.com/mattn/go-shellwords/go.test.sh b/vendor/github.com/mattn/go-shellwords/go.test.sh new file mode 100644 index 000000000..a7deaca96 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go index 41429d8f2..2dca7f136 100644 --- a/vendor/github.com/mattn/go-shellwords/shellwords.go +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -40,6 +40,7 @@ type Parser struct { ParseEnv bool ParseBacktick bool Position int + Dir string // If ParseEnv is true, use this for getenv. // If nil, use os.Getenv. @@ -51,6 +52,7 @@ func NewParser() *Parser { ParseEnv: ParseEnv, ParseBacktick: ParseBacktick, Position: 0, + Dir: "", } } @@ -100,11 +102,11 @@ loop: if !singleQuoted && !doubleQuoted && !dollarQuote { if p.ParseBacktick { if backQuote { - out, err := shellRun(backtick) + out, err := shellRun(backtick, p.Dir) if err != nil { return nil, err } - buf = out + buf = buf[:len(buf)-len(backtick)] + out } backtick = "" backQuote = !backQuote @@ -117,15 +119,11 @@ loop: if !singleQuoted && !doubleQuoted && !backQuote { if p.ParseBacktick { if dollarQuote { - out, err := shellRun(backtick) + out, err := shellRun(backtick, p.Dir) if err != nil { return nil, err } - if r == ')' { - buf = buf[:len(buf)-len(backtick)-2] + out - } else { - buf = buf[:len(buf)-len(backtick)-1] + out - } + buf = buf[:len(buf)-len(backtick)-2] + out } backtick = "" dollarQuote = !dollarQuote @@ -155,7 +153,7 @@ loop: continue } case ';', '&', '|', '<', '>': - if !(escaped || singleQuoted || doubleQuoted || backQuote) { + if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { if r == '>' && len(buf) > 0 { if c := buf[0]; '0' <= c && c <= '9' { i -= 1 diff --git a/vendor/github.com/mattn/go-shellwords/util_go15.go b/vendor/github.com/mattn/go-shellwords/util_go15.go index 180f00f0b..ddcbf229e 100644 --- a/vendor/github.com/mattn/go-shellwords/util_go15.go +++ b/vendor/github.com/mattn/go-shellwords/util_go15.go @@ -9,14 +9,19 @@ import ( "strings" ) -func shellRun(line string) (string, error) { +func shellRun(line, dir string) (string, error) { var b []byte var err error + var cmd *exec.Cmd if runtime.GOOS == "windows" { - b, err = exec.Command(os.Getenv("COMSPEC"), "/c", line).Output() + cmd = exec.Command(os.Getenv("COMSPEC"), "/c", line) } else { - b, err = exec.Command(os.Getenv("SHELL"), "-c", line).Output() + cmd = exec.Command(os.Getenv("SHELL"), "-c", line) } + if dir != "" { + cmd.Dir = dir + } + b, err = cmd.Output() if err != nil { return "", err } diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go index eaf1011d6..3aef2c4d7 100644 --- a/vendor/github.com/mattn/go-shellwords/util_posix.go +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -9,9 +9,13 @@ import ( "strings" ) -func shellRun(line string) (string, error) { +func shellRun(line, dir string) (string, error) { shell := os.Getenv("SHELL") - b, err := exec.Command(shell, "-c", line).Output() + cmd := exec.Command(shell, "-c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() if err != nil { if eerr, ok := err.(*exec.ExitError); ok { b = eerr.Stderr diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go index e46f89a1f..cda685091 100644 --- a/vendor/github.com/mattn/go-shellwords/util_windows.go +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -9,9 +9,13 @@ import ( "strings" ) -func shellRun(line string) (string, error) { +func shellRun(line, dir string) (string, error) { shell := os.Getenv("COMSPEC") - b, err := exec.Command(shell, "/c", line).Output() + cmd := exec.Command(shell, "/c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() if err != nil { if eerr, ok := err.(*exec.ExitError); ok { b = eerr.Stderr diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index bad7bb97f..4f35ac134 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -34,6 +34,10 @@ const ( // referenced by the manifest. MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" @@ -43,6 +47,11 @@ const ( // restrictions. MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 5d493df23..58f1095ab 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go index 0f22c469d..96746b9d4 100644 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go @@ -55,7 +55,7 @@ func (r *ffReader) Reset(d []byte) { r.l = len(d) } -// Calcuates the Position with line and line offset, +// Calculates the Position with line and line offset, // because this isn't counted for performance reasons, // it will iterate the buffer from the beginning, and should // only be used in error-paths. diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index c7f9ea64f..d7aea1b86 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -86,6 +86,7 @@ endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 @@ -212,7 +213,7 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ - . + $(DOCKERFILE_PATH) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) @@ -247,7 +248,9 @@ proto: ifdef GOLANGCI_LINT $(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif ifdef GOVENDOR diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 951d909af..6b42e7ba1 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -3,7 +3,7 @@ Directory: fixtures Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc -Mode: 755 +Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/26231 Mode: 755 @@ -21,6 +21,11 @@ Mode: 644 Path: fixtures/proc/26231/cwd SymlinkTo: /usr/bin # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/environ +Lines: 1 +PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/26231/exe SymlinkTo: /usr/bin/vim # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -356,32 +361,62 @@ debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/mdstat -Lines: 26 +Lines: 56 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] + +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] md127 : active raid1 sdi2[0] sdj2[1] 312319552 blocks [2/2] [UU] -md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] +md0 : active raid1 sdi1[0] sdj1[1] 248896 blocks [2/2] [UU] -md4 : inactive raid1 sda3[0] sdb3[1] +md4 : inactive raid1 sda3[0](F) sdb3[1](S) 4883648 blocks [2/2] [UU] -md6 : active raid1 sdb2[2] sda2[0] +md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] 195310144 blocks [2/1] [U_] [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec -md8 : active raid1 sdb1[1] sda1[0] +md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) 195310144 blocks [2/2] [UU] [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk +md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) + 523968 blocks super 1.2 [4/4] [UUUU] + resync=DELAYED + +md10 : active raid0 sda1[0] sdb1[1] + 314159265 blocks 64k chunks + +md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) + 4190208 blocks super 1.2 [2/2] [UU] + resync=PENDING + +md12 : active raid0 sdc2[0] sdd2[1] + 3886394368 blocks super 1.2 512k chunks + +md126 : active raid0 sdb[1] sdc[0] + 1855870976 blocks super external:/md127/0 128k chunks + +md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) + 7932 blocks super external:imsm + +md00 : active raid0 xvdb[0] + 4186624 blocks super 1.2 256k chunks + +md120 : active linear sda1[1] sdb1[0] + 2095104 blocks super 1.2 0k rounding + +md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] + 322560 blocks super 1.2 512k chunks + unused devices: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -609,6 +644,232 @@ Mode: 664 Directory: fixtures/sys/class Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/board_id +Lines: 1 +SM_1141000001000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver +Lines: 1 +2.31.5050 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/hca_type +Lines: 1 +MT4099 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Lines: 1 +2221223609 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Lines: 1 +87169372 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Lines: 1 +26509113295 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Lines: 1 +85734114 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Lines: 1 +3599 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data +Lines: 1 +2460436784 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets +Lines: 1 +89332064 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data +Lines: 1 +26540356890 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets +Lines: 1 +88622850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait +Lines: 1 +3846 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/net Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1182,6 +1443,35 @@ Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/name +Lines: 1 +demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/pool +Lines: 1 +iscsi-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/name +Lines: 1 +wrong +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/pool +Lines: 1 +wrong-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/devices/system Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1806,3 +2096,248 @@ Lines: 1 extent_alloc 2 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path +Lines: 1 +/home/iscsi/file_back_1G +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/iblock_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path +Lines: 1 +/dev/rbd1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rbd_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path +Lines: 1 +/dev/rbd/iscsi-images/demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d +SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +204950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +10325 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +40325 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 +SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +104950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +20095 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +71235 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 +SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +301950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +10195 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +30195 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 +SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +1234 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +1504 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +4733 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod index 8a1b839fd..b2f8cca93 100644 --- a/vendor/github.com/prometheus/procfs/go.mod +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -1,3 +1,6 @@ module github.com/prometheus/procfs -require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 +require ( + github.com/google/go-cmp v0.3.0 + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 +) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum index 7827dd3d5..db54133d7 100644 --- a/vendor/github.com/prometheus/procfs/go.sum +++ b/vendor/github.com/prometheus/procfs/go.sum @@ -1,2 +1,4 @@ +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index c66a1cf80..7ddfd6b6e 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -25,6 +25,9 @@ const ( // DefaultSysMountPoint is the common mount point of the sys filesystem. DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the commont mount point of the configfs + DefaultConfigfsMountPoint = "/sys/kernel/config" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 71c106782..2af3ada18 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -22,8 +22,8 @@ import ( ) var ( - statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) ) // MDStat holds info parsed from /proc/mdstat. @@ -34,8 +34,12 @@ type MDStat struct { ActivityState string // Number of active disks. DisksActive int64 - // Total number of disks the device consists of. + // Total number of disks the device requires. DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Spare disks in the device. + DisksSpare int64 // Number of blocks the device holds. BlocksTotal int64 // Number of blocks on the device that are in sync. @@ -59,29 +63,38 @@ func (fs FS) MDStat() ([]MDStat, error) { // parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of // structs containing the relevant info. -func parseMDStat(mdstatData []byte) ([]MDStat, error) { +func parseMDStat(mdStatData []byte) ([]MDStat, error) { mdStats := []MDStat{} - lines := strings.Split(string(mdstatData), "\n") - for i, l := range lines { - if strings.TrimSpace(l) == "" || l[0] == ' ' || - strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { continue } - deviceFields := strings.Fields(l) + deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", l) + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) } - mdName := deviceFields[0] - activityState := deviceFields[2] + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return mdStats, fmt.Errorf("missing lines for md device %s", mdName) + return nil, fmt.Errorf( + "error parsing %s: too few lines for md device", + mdName, + ) } - active, total, size, err := evalStatusLine(lines[i+1]) + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, size, err := evalStatusLine(lines[i], lines[i+1]) + if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing md device lines: %s", err) } syncLineIdx := i + 2 @@ -89,20 +102,38 @@ func parseMDStat(mdstatData []byte) ([]MDStat, error) { syncLineIdx++ } - // If device is recovering/syncing at the moment, get the number of currently + // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. syncedBlocks := size - if strings.Contains(lines[syncLineIdx], "recovery") || strings.Contains(lines[syncLineIdx], "resync") { - syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) - if err != nil { - return nil, err + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + + // Append recovery and resyncing state info. + if recovering || resyncing { + if recovering { + state = "recovering" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + syncedBlocks = 0 + } else { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) + } } } mdStats = append(mdStats, MDStat{ Name: mdName, - ActivityState: activityState, + ActivityState: state, DisksActive: active, + DisksFailed: fail, + DisksSpare: spare, DisksTotal: total, BlocksTotal: size, BlocksSynced: syncedBlocks, @@ -112,39 +143,51 @@ func parseMDStat(mdstatData []byte) ([]MDStat, error) { return mdStats, nil } -func evalStatusLine(statusline string) (active, total, size int64, err error) { - matches := statuslineRE.FindStringSubmatch(statusline) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) - } +func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { - size, err = strconv.ParseInt(matches[1], 10, 64) + sizeStr := strings.Fields(statusLine)[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) } return active, total, size, nil } -func evalRecoveryLine(buildline string) (syncedBlocks int64, err error) { - matches := buildlineRE.FindStringSubmatch(buildline) +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { + matches := recoveryLineRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, fmt.Errorf("unexpected buildline: %s", buildline) + return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) } return syncedBlocks, nil diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 000000000..61fa61887 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,178 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +var validOptionalFields = map[string]bool{ + "shared": true, + "master": true, + "propagate_from": true, + "unbindable": true, +} + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique Id for the mount + MountId int + // The Id of the parent mount + ParentId int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Returns part of the mountinfo line, if it exists, else an empty string. +func getStringSliceElement(parts []string, idx int, defaultValue string) string { + if idx >= len(parts) { + return defaultValue + } + return parts[idx] +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(r io.Reader) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(r) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + // OptionalFields can be zero, hence these checks to ensure we do not populate the wrong values in the wrong spots + separatorIndex := strings.Index(mountString, "-") + if separatorIndex == -1 { + return nil, fmt.Errorf("no separator found in mountinfo string: %s", mountString) + } + beforeFields := strings.Fields(mountString[:separatorIndex]) + afterFields := strings.Fields(mountString[separatorIndex+1:]) + if (len(beforeFields) + len(afterFields)) < 7 { + return nil, fmt.Errorf("too few fields") + } + + mount := &MountInfo{ + MajorMinorVer: getStringSliceElement(beforeFields, 2, ""), + Root: getStringSliceElement(beforeFields, 3, ""), + MountPoint: getStringSliceElement(beforeFields, 4, ""), + Options: mountOptionsParser(getStringSliceElement(beforeFields, 5, "")), + OptionalFields: nil, + FSType: getStringSliceElement(afterFields, 0, ""), + Source: getStringSliceElement(afterFields, 1, ""), + SuperOptions: mountOptionsParser(getStringSliceElement(afterFields, 2, "")), + } + + mount.MountId, err = strconv.Atoi(getStringSliceElement(beforeFields, 0, "")) + if err != nil { + return nil, fmt.Errorf("failed to parse mount ID") + } + mount.ParentId, err = strconv.Atoi(getStringSliceElement(beforeFields, 1, "")) + if err != nil { + return nil, fmt.Errorf("failed to parse parent ID") + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if len(beforeFields) > 6 { + mount.OptionalFields = make(map[string]string) + optionalFields := beforeFields[6:] + for _, field := range optionalFields { + optionSplit := strings.Split(field, ":") + target, value := optionSplit[0], "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + // Checks if the 'keys' in the optional fields in the mountinfo line are acceptable. + // Allowed 'keys' are shared, master, propagate_from, unbindable. + if _, ok := validOptionalFields[target]; ok { + mount.OptionalFields[target] = value + } + } + } + return mount, nil +} + +// Parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// Retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + return parseMountInfo(f) +} + +// Retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + return parseMountInfo(f) +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 8a8430147..41c148d06 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -247,6 +247,20 @@ func (p Proc) MountStats() ([]*Mount, error) { return parseMountStats(f) } +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + f, err := os.Open(p.path("mountinfo")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountInfo(f) +} + func (p Proc) fileDescriptors() ([]string, error) { d, err := os.Open(p.path("fd")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 000000000..7172bb586 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "io/ioutil" + "os" + "strings" +) + +// Environ reads process environments from /proc//environ +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + f, err := os.Open(p.path("environ")) + if err != nil { + return environments, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 6ed98a8ae..dbde1fa0d 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -106,7 +106,7 @@ type ProcStat struct { // NewStat returns the current status information of the process. // -// Deprecated: use NewStat() instead +// Deprecated: use p.Stat() instead func (p Proc) NewStat() (ProcStat, error) { return p.Stat() } diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go index 7faf5d7f9..798c1f1c5 100644 --- a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go @@ -130,10 +130,10 @@ func (p *JsonPointer) implementation(i *implStruct) { node = v[decodedToken] if isLastToken && i.mode == "SET" { v[decodedToken] = i.setInValue - } else if isLastToken && i.mode =="DEL" { - delete(v,decodedToken) + } else if isLastToken && i.mode == "DEL" { + delete(v, decodedToken) } - } else if (isLastToken && i.mode == "SET") { + } else if isLastToken && i.mode == "SET" { v[decodedToken] = i.setInValue } else { i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) @@ -160,7 +160,7 @@ func (p *JsonPointer) implementation(i *implStruct) { node = v[tokenIndex] if isLastToken && i.mode == "SET" { v[tokenIndex] = i.setInValue - } else if isLastToken && i.mode =="DEL" { + } else if isLastToken && i.mode == "DEL" { v[tokenIndex] = v[len(v)-1] v[len(v)-1] = nil v = v[:len(v)-1] diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 72afe3338..6e5c81acd 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -7,6 +7,7 @@ package unix import ( + "math/bits" "unsafe" ) @@ -79,46 +80,7 @@ func (s *CPUSet) IsSet(cpu int) bool { func (s *CPUSet) Count() int { c := 0 for _, b := range s { - c += onesCount64(uint64(b)) + c += bits.OnesCount64(uint64(b)) } return c } - -// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64. -// Once this package can require Go 1.9, we can delete this -// and update the caller to use bits.OnesCount64. -func onesCount64(x uint64) int { - const m0 = 0x5555555555555555 // 01010101 ... - const m1 = 0x3333333333333333 // 00110011 ... - const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... - const m3 = 0x00ff00ff00ff00ff // etc. - const m4 = 0x0000ffff0000ffff - - // Implementation: Parallel summing of adjacent bits. - // See "Hacker's Delight", Chap. 5: Counting Bits. - // The following pattern shows the general approach: - // - // x = x>>1&(m0&m) + x&(m0&m) - // x = x>>2&(m1&m) + x&(m1&m) - // x = x>>4&(m2&m) + x&(m2&m) - // x = x>>8&(m3&m) + x&(m3&m) - // x = x>>16&(m4&m) + x&(m4&m) - // x = x>>32&(m5&m) + x&(m5&m) - // return int(x) - // - // Masking (& operations) can be left away when there's no - // danger that a field's sum will carry over into the next - // field: Since the result cannot be > 64, 8 bits is enough - // and we can ignore the masks for the shifts by 8 and up. - // Per "Hacker's Delight", the first line can be simplified - // more, but it saves at best one instruction, so we leave - // it alone for clarity. - const m = 1<<64 - 1 - x = x>>1&(m0&m) + x&(m0&m) - x = x>>2&(m1&m) + x&(m1&m) - x = (x>>4 + x) & (m2 & m) - x += x >> 8 - x += x >> 16 - x += x >> 32 - return int(x) & (1<<7 - 1) -} diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 6f3460e69..304016b68 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 085df2d8d..bcdb5d30e 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le +// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index f121a8d64..3559e5dcb 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -6,7 +6,19 @@ package unix -import "runtime" +import ( + "runtime" + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // @@ -14,7 +26,7 @@ import "runtime" func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctlSetWinsize(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } @@ -24,7 +36,30 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctlSetTermios(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 3d85f2795..85cfbd049 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -199,12 +199,14 @@ struct ltchars { #include #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -435,6 +437,8 @@ ccflags="$@" $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || + $2 ~ /^LO_(KEY|NAME)_SIZE$/ || + $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || @@ -448,6 +452,7 @@ ccflags="$@" $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ || $2 ~ /^KEXEC_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 1aa065f9c..9ad8a0d4a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -350,49 +350,12 @@ func (w WaitStatus) Signal() Signal { func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 } -func (w WaitStatus) CoreDump() bool { return w&0x200 != 0 } +func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, // Therefore, the programmer must call dup2 instead of fcntl in this case. diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 216b4ac9e..f26a19ebd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -89,7 +89,6 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } @@ -340,43 +339,6 @@ func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(sig //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 489726fa9..cd8be182a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -10,6 +10,8 @@ import ( "syscall" ) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 914b89bde..d0d07243c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -10,6 +10,8 @@ import ( "syscall" ) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 4a284cf50..01e8a38a9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -8,6 +8,10 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 52dcd88f6..e674f81da 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -10,6 +10,10 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 260a400f9..474181c02 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -150,43 +150,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { err := sysctl(mib, old, oldlen, nil, 0) if err != nil { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 329d240b9..d95c4436f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -201,43 +201,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 637b5017b..fe30b9544 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -71,6 +71,17 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. +// IoctlRetInt performs an ioctl operation specified by req on a device +// associated with opened file descriptor fd, and returns a non-negative +// integer that is returned by the ioctl syscall. +func IoctlRetInt(fd int, req uint) (int, error) { + ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + // IoctlSetPointerInt performs an ioctl operation which sets an // integer value on fd, using the specified request number. The ioctl // argument is called with a pointer to the integer value, rather than @@ -80,52 +91,18 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) } -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetRTCTime(fd int, value *RTCTime) error { err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 5ef309040..7f9812b4f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -187,43 +187,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 1a074b2fe..9a26768f6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -178,43 +178,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 0153a316d..1610f551d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -553,40 +553,10 @@ func Minor(dev uint64) uint32 { //sys ioctl(fd int, req uint, arg uintptr) (err error) -func IoctlSetInt(fd int, req uint, value int) (err error) { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetTermio(fd int, req uint) (*Termio, error) { var value Termio err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1db2f00de..2839b3df2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -708,6 +722,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -973,6 +988,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1135,6 +1151,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1378,6 +1408,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1997,6 +2031,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2204,6 +2242,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2423,6 +2462,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x800854db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8a9d2eadf..99e3a3de2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -708,6 +722,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -973,6 +988,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1135,6 +1151,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1378,6 +1408,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1998,6 +2032,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2205,6 +2243,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2424,6 +2463,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 2e7455814..f5f5ee562 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1376,6 +1406,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2004,6 +2038,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2211,6 +2249,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2430,6 +2469,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x800854db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index b1dc633a2..64573bdb7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -528,6 +541,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -710,6 +724,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -975,6 +990,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1137,6 +1153,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1379,6 +1409,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1988,6 +2022,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2196,6 +2234,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2415,6 +2454,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index ad4d9afb6..3e948be30 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1376,6 +1406,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1997,6 +2031,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2205,6 +2243,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2425,6 +2464,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x400854db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index fe2965028..8ac128bbd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1376,6 +1406,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1997,6 +2031,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2205,6 +2243,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2425,6 +2464,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 608878303..e8845a7d4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1376,6 +1406,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1997,6 +2031,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2205,6 +2243,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2425,6 +2464,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 4cf9ddfad..338c044eb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1376,6 +1406,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1997,6 +2031,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2205,6 +2243,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2425,6 +2464,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x400854db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 374e3007f..a696532fc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1377,6 +1407,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2055,6 +2089,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2262,6 +2300,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2485,6 +2524,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index badf14102..9197b3353 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1377,6 +1407,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2055,6 +2089,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2262,6 +2300,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2485,6 +2524,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 0ce8c7eff..d1e023ed7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1376,6 +1406,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1985,6 +2019,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2192,6 +2230,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2411,6 +2450,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 47675125a..1dfacf183 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -707,6 +721,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -972,6 +987,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1134,6 +1150,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1376,6 +1406,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2058,6 +2092,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2265,6 +2303,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2484,6 +2523,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index a46fc9b43..b78e49fcf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -199,6 +199,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -220,6 +222,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -241,16 +248,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -293,8 +303,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -414,6 +426,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -531,6 +544,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -711,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x1 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -976,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1138,6 +1154,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1380,6 +1410,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2050,6 +2084,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2257,6 +2295,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2473,6 +2512,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index c4ec7ff87..dd5ea36ee 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -377,16 +377,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1691,6 +1681,16 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 23346dc68..78ca92339 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -2341,6 +2326,21 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index 37b85b4f6..f40465ca8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -264,6 +262,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index c142e33e9..64df03c45 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -2356,6 +2341,21 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 1a3915197..debcb8ed3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -266,6 +264,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 01cffbf46..ed3306239 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index 994056f35..66af9f480 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 8f2691dee..5258a7328 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -527,21 +527,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 61dc0d4c1..f57f48f82 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -64,8 +64,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 33b6e4d1a..e869c0603 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -423,4 +423,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 9ba207847..4917b8ab6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -345,4 +345,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 94f68f101..f85fcb4f8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 15c413516..678a119bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -290,4 +290,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 638465b14..222c9f9a2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -408,4 +408,10 @@ const ( SYS_IO_URING_SETUP = 4425 SYS_IO_URING_ENTER = 4426 SYS_IO_URING_REGISTER = 4427 + SYS_OPEN_TREE = 4428 + SYS_MOVE_MOUNT = 4429 + SYS_FSOPEN = 4430 + SYS_FSCONFIG = 4431 + SYS_FSMOUNT = 4432 + SYS_FSPICK = 4433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 57ec82aac..28e6d0e9d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -338,4 +338,10 @@ const ( SYS_IO_URING_SETUP = 5425 SYS_IO_URING_ENTER = 5426 SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 825a3e3b0..e643c6f63 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -338,4 +338,10 @@ const ( SYS_IO_URING_SETUP = 5425 SYS_IO_URING_ENTER = 5426 SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index f152dfdd0..01d93c420 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -408,4 +408,10 @@ const ( SYS_IO_URING_SETUP = 4425 SYS_IO_URING_ENTER = 4426 SYS_IO_URING_REGISTER = 4427 + SYS_OPEN_TREE = 4428 + SYS_MOVE_MOUNT = 4429 + SYS_FSOPEN = 4430 + SYS_FSCONFIG = 4431 + SYS_FSMOUNT = 4432 + SYS_FSPICK = 4433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 7cbe78b19..5744149eb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 51a2f1236..21c832042 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 323432ae3..c1bb6d8f2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -289,4 +289,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 9dca97484..bc3cc6b5b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -352,4 +352,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index d3da46f0d..0a2841ba8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -367,4 +367,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 5492b9666..50bc4128f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2484,3 +2484,40 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint32 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index caf33b2c5..055eaa76a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2497,3 +2497,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 93aec7e22..66019c9cf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2475,3 +2475,40 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint32 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]uint8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 0a038436d..3104798c4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2476,3 +2476,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 2de0e5800..46c86021b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2481,3 +2481,40 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 3735eb42e..c2fe1a62a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2478,3 +2478,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 073c29939..f1eb0d397 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2478,3 +2478,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 58d09f75e..8759bc36b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2481,3 +2481,40 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 3f1e62e03..a81200541 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2486,3 +2486,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index e67be11eb..74b7a9199 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2486,3 +2486,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index f44f29403..ccea3e638 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -808,6 +808,7 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } @@ -2503,3 +2504,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 90bf5dcc7..d8fc0bc1c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2500,3 +2500,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint64 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 4f054dcbb..5e0ab9329 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2481,3 +2481,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 61b49647b..7b2cfb9e0 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -644,6 +644,8 @@ func (tml *Tokenmandatorylabel) Size() uint32 { //sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx //sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW //sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW +//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW +//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW // An access token contains the security information for a logon session. // The system creates an access token when a user logs on, and every @@ -664,7 +666,7 @@ func OpenCurrentProcessToken() (Token, error) { return 0, e } var t Token - e = OpenProcessToken(p, TOKEN_QUERY, &t) + e = OpenProcessToken(p, TOKEN_QUERY|TOKEN_DUPLICATE, &t) if e != nil { return 0, e } @@ -785,8 +787,8 @@ func (token Token) GetLinkedToken() (Token, error) { return linkedToken, nil } -// GetSystemDirectory retrieves path to current location of the system -// directory, which is typically, though not always, C:\Windows\System32. +// GetSystemDirectory retrieves the path to current location of the system +// directory, which is typically, though not always, `C:\Windows\System32`. func GetSystemDirectory() (string, error) { n := uint32(MAX_PATH) for { @@ -802,6 +804,42 @@ func GetSystemDirectory() (string, error) { } } +// GetWindowsDirectory retrieves the path to current location of the Windows +// directory, which is typically, though not always, `C:\Windows`. This may +// be a private user directory in the case that the application is running +// under a terminal server. +func GetWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetSystemWindowsDirectory retrieves the path to current location of the +// Windows directory, which is typically, though not always, `C:\Windows`. +func GetSystemWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + // IsMember reports whether the access token t is a member of the provided SID. func (t Token) IsMember(sid *SID) (bool, error) { var b int32 diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 03383f1df..847e00bc9 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -159,6 +159,10 @@ type SERVICE_DESCRIPTION struct { Description *uint16 } +type SERVICE_DELAYED_AUTO_START_INFO struct { + IsDelayedAutoStartUp uint32 +} + type SERVICE_STATUS_PROCESS struct { ServiceType uint32 CurrentState uint32 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index b23050924..abdefc33c 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -257,6 +257,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW +//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW +//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex //sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx //sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW //sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject @@ -269,6 +273,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) //sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) +//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -296,6 +301,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid //sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree //sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers // syscall interface implementation for other packages @@ -1306,8 +1312,8 @@ func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, e return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil } -// RtlGetVersion returns the true version of the underlying operating system, ignoring -// any manifesting or compatibility layers on top of the win32 layer. +// RtlGetVersion returns the version of the underlying operating system, ignoring +// manifest semantics but is affected by the application compatibility layer. func RtlGetVersion() *OsVersionInfoEx { info := &OsVersionInfoEx{} info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) @@ -1318,3 +1324,11 @@ func RtlGetVersion() *OsVersionInfoEx { _ = rtlGetVersion(info) return info } + +// RtlGetNtVersionNumbers returns the version of the underlying operating system, +// ignoring manifest semantics and the application compatibility layer. +func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { + rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) + buildNumber &= 0xffff + return +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 8a563f92b..1ef80cd71 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -197,8 +197,11 @@ const ( FILE_MAP_READ = 0x04 FILE_MAP_EXECUTE = 0x20 - CTRL_C_EVENT = 0 - CTRL_BREAK_EVENT = 1 + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + CTRL_CLOSE_EVENT = 2 + CTRL_LOGOFF_EVENT = 5 + CTRL_SHUTDOWN_EVENT = 6 // Windows reserves errors >= 1<<29 for application use. APPLICATION_ERROR = 1 << 29 @@ -1187,6 +1190,28 @@ const ( REG_QWORD = REG_QWORD_LITTLE_ENDIAN ) +const ( + EVENT_MODIFY_STATE = 0x0002 + EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + MUTANT_QUERY_STATE = 0x0001 + MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE + + SEMAPHORE_MODIFY_STATE = 0x0002 + SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + TIMER_QUERY_STATE = 0x0001 + TIMER_MODIFY_STATE = 0x0002 + TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE + + MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE + MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS + + CREATE_EVENT_MANUAL_RESET = 0x1 + CREATE_EVENT_INITIAL_SET = 0x2 + CREATE_MUTEX_INITIAL_OWNER = 0x1 +) + type AddrinfoW struct { Flags int32 Family int32 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index d461bed98..9c448be31 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -197,6 +197,10 @@ var ( procSetEvent = modkernel32.NewProc("SetEvent") procResetEvent = modkernel32.NewProc("ResetEvent") procPulseEvent = modkernel32.NewProc("PulseEvent") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procSleepEx = modkernel32.NewProc("SleepEx") procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -209,6 +213,7 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetProcessId = modkernel32.NewProc("GetProcessId") procOpenThread = modkernel32.NewProc("OpenThread") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") @@ -234,6 +239,7 @@ var ( procCoCreateGuid = modole32.NewProc("CoCreateGuid") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") procWSAStartup = modws2_32.NewProc("WSAStartup") procWSACleanup = modws2_32.NewProc("WSACleanup") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -303,6 +309,8 @@ var ( procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") @@ -2105,6 +2113,69 @@ func PulseEvent(event Handle) (err error) { return } +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { var _p0 uint32 if alertable { @@ -2255,6 +2326,24 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand return } +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { @@ -2530,6 +2619,11 @@ func rtlGetVersion(info *OsVersionInfoEx) (ret error) { return } +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + return +} + func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { @@ -3307,6 +3401,32 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { return } +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func WTSQueryUserToken(session uint32, token *Token) (err error) { r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) if r1 == 0 { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index ae93e2471..85c18b5a3 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -281,6 +281,23 @@ func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { lim.limit = newLimit } +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.burst = newBurst +} + // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 000000000..0b9907f89 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +package status + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes +// if needed. The error message should be a developer-facing English message +// that helps developers *understand* and *resolve* the error. If a localized +// user-facing error message is needed, put the localized message in the error +// details or localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of error +// detail types in the package `google.rpc` that can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_24d244abaf643bfe, []int{0} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } + +var fileDescriptor_24d244abaf643bfe = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, + 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..0b206a578 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..02738839d --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,198 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..34ec36fbf --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "context" + + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..51bb9457c --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..097494f71 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..d49325776 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..bc1f99ac8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,71 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" +) + +var ( + // WithResolverBuilder is exported by dialoptions.go + WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption + // WithHealthCheckFunc is not exported by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // ParseServiceConfig is a function to parse JSON service configs into + // opaque data structures. + ParseServiceConfig func(sc string) (interface{}, error) + // StatusRawProto is exported by status/status.go. This func returns a + // pointer to the wrapped Status proto for a given status.Status without a + // call to proto.Clone(). The returned Status proto should not be mutated by + // the caller. + StatusRawProto interface{} // func (*status.Status) *spb.Status +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..a1348e9b1 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,228 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" +) + +func init() { + internal.StatusRawProto = statusRawProto +} + +func statusRawProto(s *Status) *spb.Status { return s.s } + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) GRPCStatus() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Is implements future error.Is functionality. +// A statusError is equivalent if the code and message are identical. +func (se *statusError) Is(target error) bool { + tse, ok := target.(*statusError) + if !ok { + return false + } + + return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a +// Status is returned with codes.Unknown and the original error message. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return nil + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 68106f3d8..c01409444 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -3,11 +3,13 @@ github.com/Azure/go-ansiterm/winterm github.com/Azure/go-ansiterm # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml -# github.com/Microsoft/go-winio v0.4.12 +# github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/go-winio +github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/archive/tar github.com/Microsoft/go-winio/backuptar # github.com/Microsoft/hcsshim v0.8.6 +github.com/Microsoft/hcsshim/osversion github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/internal/guid github.com/Microsoft/hcsshim/internal/hcs @@ -25,7 +27,7 @@ github.com/Microsoft/hcsshim/internal/longpath github.com/Microsoft/hcsshim/internal/safefile # github.com/VividCortex/ewma v1.1.1 github.com/VividCortex/ewma -# github.com/beorn7/perks v1.0.0 +# github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.1+incompatible github.com/blang/semver @@ -49,7 +51,7 @@ github.com/containernetworking/plugins/pkg/ip github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator github.com/containernetworking/plugins/pkg/utils/hwaddr github.com/containernetworking/plugins/plugins/ipam/host-local/backend -# github.com/containers/buildah v1.11.2 +# github.com/containers/buildah v1.11.3 github.com/containers/buildah github.com/containers/buildah/imagebuildah github.com/containers/buildah/pkg/chrootuser @@ -62,46 +64,50 @@ github.com/containers/buildah/bind github.com/containers/buildah/chroot github.com/containers/buildah/docker github.com/containers/buildah/pkg/blobcache +github.com/containers/buildah/pkg/cgroups github.com/containers/buildah/pkg/overlay github.com/containers/buildah/pkg/unshare -github.com/containers/buildah/pkg/cgroups github.com/containers/buildah/pkg/umask -# github.com/containers/image v3.0.2+incompatible -github.com/containers/image/directory -github.com/containers/image/docker -github.com/containers/image/docker/archive -github.com/containers/image/manifest -github.com/containers/image/pkg/docker/config -github.com/containers/image/signature -github.com/containers/image/transports -github.com/containers/image/transports/alltransports -github.com/containers/image/types -github.com/containers/image/oci/archive -github.com/containers/image/storage -github.com/containers/image/copy -github.com/containers/image/docker/reference -github.com/containers/image/docker/tarfile -github.com/containers/image/oci/layout -github.com/containers/image/tarball -github.com/containers/image/pkg/sysregistriesv2 -github.com/containers/image/image -github.com/containers/image/directory/explicitfilepath -github.com/containers/image/docker/policyconfiguration -github.com/containers/image/pkg/blobinfocache/none -github.com/containers/image/pkg/tlsclientconfig -github.com/containers/image/pkg/strslice -github.com/containers/image/pkg/keyctl -github.com/containers/image/version -github.com/containers/image/docker/daemon -github.com/containers/image/openshift -github.com/containers/image/ostree -github.com/containers/image/internal/tmpdir -github.com/containers/image/oci/internal -github.com/containers/image/pkg/blobinfocache -github.com/containers/image/pkg/compression -github.com/containers/image/pkg/blobinfocache/boltdb -github.com/containers/image/pkg/blobinfocache/memory -github.com/containers/image/pkg/blobinfocache/internal/prioritize +# github.com/containers/image/v4 v4.0.1 +github.com/containers/image/v4/directory +github.com/containers/image/v4/docker +github.com/containers/image/v4/docker/archive +github.com/containers/image/v4/manifest +github.com/containers/image/v4/pkg/docker/config +github.com/containers/image/v4/signature +github.com/containers/image/v4/transports +github.com/containers/image/v4/transports/alltransports +github.com/containers/image/v4/types +github.com/containers/image/v4/oci/archive +github.com/containers/image/v4/storage +github.com/containers/image/v4/copy +github.com/containers/image/v4/docker/reference +github.com/containers/image/v4/docker/tarfile +github.com/containers/image/v4/oci/layout +github.com/containers/image/v4/tarball +github.com/containers/image/v4/pkg/sysregistriesv2 +github.com/containers/image/v4/image +github.com/containers/image/v4/directory/explicitfilepath +github.com/containers/image/v4/docker/policyconfiguration +github.com/containers/image/v4/pkg/blobinfocache/none +github.com/containers/image/v4/pkg/tlsclientconfig +github.com/containers/image/v4/pkg/compression +github.com/containers/image/v4/pkg/strslice +github.com/containers/image/v4/internal/pkg/keyctl +github.com/containers/image/v4/version +github.com/containers/image/v4/docker/daemon +github.com/containers/image/v4/openshift +github.com/containers/image/v4/ostree +github.com/containers/image/v4/pkg/compression/types +github.com/containers/image/v4/internal/tmpdir +github.com/containers/image/v4/oci/internal +github.com/containers/image/v4/pkg/blobinfocache +github.com/containers/image/v4/pkg/compression/internal +github.com/containers/image/v4/pkg/blobinfocache/boltdb +github.com/containers/image/v4/pkg/blobinfocache/memory +github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize +# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b +github.com/containers/libtrust # github.com/containers/psgo v1.3.1 github.com/containers/psgo github.com/containers/psgo/internal/capabilities @@ -177,7 +183,7 @@ github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory github.com/docker/distribution/metrics -# github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16 +# github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b github.com/docker/docker/pkg/signal github.com/docker/docker/pkg/homedir github.com/docker/docker/oci/caps @@ -220,7 +226,7 @@ github.com/docker/docker-credential-helpers/client github.com/docker/go-connections/nat github.com/docker/go-connections/tlsconfig github.com/docker/go-connections/sockets -# github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 +# github.com/docker/go-metrics v0.0.1 github.com/docker/go-metrics # github.com/docker/go-units v0.4.0 github.com/docker/go-units @@ -228,8 +234,6 @@ github.com/docker/go-units github.com/docker/libnetwork/resolvconf github.com/docker/libnetwork/types github.com/docker/libnetwork/resolvconf/dns -# github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 -github.com/docker/libtrust # github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c github.com/docker/spdystream github.com/docker/spdystream/spdy @@ -239,7 +243,7 @@ github.com/etcd-io/bbolt github.com/fatih/camelcase # github.com/fsnotify/fsnotify v1.4.7 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.4.1 +# github.com/fsouza/go-dockerclient v1.4.4 github.com/fsouza/go-dockerclient github.com/fsouza/go-dockerclient/internal/archive github.com/fsouza/go-dockerclient/internal/jsonmessage @@ -251,13 +255,17 @@ github.com/godbus/dbus # github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/protobuf v1.3.1 +# github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/timestamp # github.com/google/gofuzz v1.0.0 github.com/google/gofuzz # github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf github.com/google/shlex -# github.com/gorilla/mux v1.7.2 +# github.com/gorilla/mux v1.7.3 github.com/gorilla/mux # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap @@ -279,7 +287,7 @@ github.com/inconshreveable/mousetrap github.com/ishidawataru/sctp # github.com/json-iterator/go v1.1.7 github.com/json-iterator/go -# github.com/klauspost/compress v1.7.2 +# github.com/klauspost/compress v1.8.1 github.com/klauspost/compress/zstd github.com/klauspost/compress/flate github.com/klauspost/compress/huff0 @@ -294,7 +302,7 @@ github.com/klauspost/pgzip github.com/konsorten/go-windows-terminal-sequences # github.com/mattn/go-isatty v0.0.8 github.com/mattn/go-isatty -# github.com/mattn/go-shellwords v1.0.5 +# github.com/mattn/go-shellwords v1.0.6 github.com/mattn/go-shellwords # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil @@ -352,7 +360,7 @@ github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util # github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.1 +# github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/image-spec/specs-go/v1 github.com/opencontainers/image-spec/specs-go # github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 @@ -395,12 +403,12 @@ github.com/pkg/errors github.com/pkg/profile # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 +# github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 github.com/pquerna/ffjson/fflib/v1 github.com/pquerna/ffjson/inception github.com/pquerna/ffjson/shared github.com/pquerna/ffjson/fflib/v1/internal -# github.com/prometheus/client_golang v1.0.0 +# github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/internal @@ -410,7 +418,7 @@ github.com/prometheus/client_model/go github.com/prometheus/common/expfmt github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -# github.com/prometheus/procfs v0.0.2 +# github.com/prometheus/procfs v0.0.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs # github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 @@ -476,13 +484,13 @@ github.com/vishvananda/netlink github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f github.com/vishvananda/netns -# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f +# github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b github.com/xeipuuv/gojsonpointer # github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.1.0 github.com/xeipuuv/gojsonschema -# golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 +# golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/crypto/ssh/terminal golang.org/x/crypto/openpgp golang.org/x/crypto/openpgp/armor @@ -508,7 +516,7 @@ golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb +# golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry @@ -533,7 +541,7 @@ golang.org/x/text/runes golang.org/x/text/internal/language golang.org/x/text/internal/language/compact golang.org/x/text/internal/tag -# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 +# golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 golang.org/x/time/rate # google.golang.org/appengine v1.6.1 google.golang.org/appengine/urlfetch @@ -543,6 +551,14 @@ google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api +# google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.24.0 +google.golang.org/grpc/codes +google.golang.org/grpc/status +google.golang.org/grpc/internal +google.golang.org/grpc/connectivity +google.golang.org/grpc/grpclog # gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/fsnotify.v1 # gopkg.in/inf.v0 v0.9.1 -- cgit v1.2.3-54-g00ecf