summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containerd/containerd/LICENSE191
-rw-r--r--vendor/github.com/containerd/containerd/NOTICE16
-rw-r--r--vendor/github.com/containerd/containerd/errdefs/errors.go93
-rw-r--r--vendor/github.com/containerd/containerd/errdefs/grpc.go147
-rw-r--r--vendor/github.com/containerd/continuity/fs/copy.go172
-rw-r--r--vendor/github.com/containerd/continuity/fs/copy_linux.go144
-rw-r--r--vendor/github.com/containerd/continuity/fs/copy_unix.go112
-rw-r--r--vendor/github.com/containerd/continuity/fs/copy_windows.go49
-rw-r--r--vendor/github.com/containerd/continuity/fs/diff.go326
-rw-r--r--vendor/github.com/containerd/continuity/fs/diff_unix.go74
-rw-r--r--vendor/github.com/containerd/continuity/fs/diff_windows.go48
-rw-r--r--vendor/github.com/containerd/continuity/fs/dtype_linux.go103
-rw-r--r--vendor/github.com/containerd/continuity/fs/du.go38
-rw-r--r--vendor/github.com/containerd/continuity/fs/du_unix.go110
-rw-r--r--vendor/github.com/containerd/continuity/fs/du_windows.go82
-rw-r--r--vendor/github.com/containerd/continuity/fs/hardlink.go43
-rw-r--r--vendor/github.com/containerd/continuity/fs/hardlink_unix.go34
-rw-r--r--vendor/github.com/containerd/continuity/fs/hardlink_windows.go23
-rw-r--r--vendor/github.com/containerd/continuity/fs/path.go313
-rw-r--r--vendor/github.com/containerd/continuity/fs/stat_bsd.go44
-rw-r--r--vendor/github.com/containerd/continuity/fs/stat_linux.go43
-rw-r--r--vendor/github.com/containerd/continuity/fs/time.go29
-rw-r--r--vendor/github.com/containerd/continuity/pathdriver/path_driver.go101
-rw-r--r--vendor/github.com/containerd/continuity/syscallx/syscall_unix.go26
-rw-r--r--vendor/github.com/containerd/continuity/syscallx/syscall_windows.go112
-rw-r--r--vendor/github.com/containerd/continuity/sysx/README.md3
-rw-r--r--vendor/github.com/containerd/continuity/sysx/file_posix.go128
-rw-r--r--vendor/github.com/containerd/continuity/sysx/generate.sh52
-rw-r--r--vendor/github.com/containerd/continuity/sysx/nodata_linux.go23
-rw-r--r--vendor/github.com/containerd/continuity/sysx/nodata_solaris.go24
-rw-r--r--vendor/github.com/containerd/continuity/sysx/nodata_unix.go25
-rw-r--r--vendor/github.com/containerd/continuity/sysx/xattr.go125
-rw-r--r--vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go67
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md26
-rw-r--r--vendor/github.com/containers/buildah/OWNERS22
-rw-r--r--vendor/github.com/containers/buildah/buildah.go4
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt22
-rw-r--r--vendor/github.com/containers/buildah/commit.go20
-rw-r--r--vendor/github.com/containers/buildah/common.go4
-rw-r--r--vendor/github.com/containers/buildah/config.go6
-rw-r--r--vendor/github.com/containers/buildah/docker/types.go2
-rw-r--r--vendor/github.com/containers/buildah/go.mod10
-rw-r--r--vendor/github.com/containers/buildah/go.sum29
-rw-r--r--vendor/github.com/containers/buildah/image.go12
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go4
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go10
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go23
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/util.go11
-rw-r--r--vendor/github.com/containers/buildah/import.go6
-rw-r--r--vendor/github.com/containers/buildah/new.go12
-rw-r--r--vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go28
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go13
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go8
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse_unix.go38
-rw-r--r--vendor/github.com/containers/buildah/pull.go24
-rw-r--r--vendor/github.com/containers/buildah/util.go6
-rw-r--r--vendor/github.com/containers/buildah/util/util.go12
-rw-r--r--vendor/github.com/containers/image/v4/image/docker_list.go94
-rw-r--r--vendor/github.com/containers/image/v5/LICENSE (renamed from vendor/github.com/containers/image/v4/LICENSE)0
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go (renamed from vendor/github.com/containers/image/v4/copy/copy.go)370
-rw-r--r--vendor/github.com/containers/image/v5/copy/manifest.go (renamed from vendor/github.com/containers/image/v4/copy/manifest.go)37
-rw-r--r--vendor/github.com/containers/image/v5/copy/progress_reader.go (renamed from vendor/github.com/containers/image/v4/copy/progress_reader.go)2
-rw-r--r--vendor/github.com/containers/image/v5/copy/sign.go (renamed from vendor/github.com/containers/image/v4/copy/sign.go)4
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_dest.go (renamed from vendor/github.com/containers/image/v4/directory/directory_dest.go)19
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_src.go (renamed from vendor/github.com/containers/image/v4/directory/directory_src.go)26
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_transport.go (renamed from vendor/github.com/containers/image/v4/directory/directory_transport.go)22
-rw-r--r--vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go (renamed from vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go)0
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/dest.go (renamed from vendor/github.com/containers/image/v4/docker/archive/dest.go)6
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/src.go (renamed from vendor/github.com/containers/image/v4/docker/archive/src.go)9
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/transport.go (renamed from vendor/github.com/containers/image/v4/docker/archive/transport.go)8
-rw-r--r--vendor/github.com/containers/image/v5/docker/cache.go (renamed from vendor/github.com/containers/image/v4/docker/cache.go)4
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/client.go (renamed from vendor/github.com/containers/image/v4/docker/daemon/client.go)2
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go (renamed from vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go)8
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go (renamed from vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go)9
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go (renamed from vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go)10
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_client.go (renamed from vendor/github.com/containers/image/v4/docker/docker_client.go)116
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go (renamed from vendor/github.com/containers/image/v4/docker/docker_image.go)12
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go (renamed from vendor/github.com/containers/image/v4/docker/docker_image_dest.go)98
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go (renamed from vendor/github.com/containers/image/v4/docker/docker_image_src.go)24
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_transport.go (renamed from vendor/github.com/containers/image/v4/docker/docker_transport.go)8
-rw-r--r--vendor/github.com/containers/image/v5/docker/errors.go43
-rw-r--r--vendor/github.com/containers/image/v5/docker/lookaside.go (renamed from vendor/github.com/containers/image/v4/docker/lookaside.go)4
-rw-r--r--vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go (renamed from vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go)2
-rw-r--r--vendor/github.com/containers/image/v5/docker/reference/README.md (renamed from vendor/github.com/containers/image/v4/docker/reference/README.md)0
-rw-r--r--vendor/github.com/containers/image/v5/docker/reference/helpers.go (renamed from vendor/github.com/containers/image/v4/docker/reference/helpers.go)0
-rw-r--r--vendor/github.com/containers/image/v5/docker/reference/normalize.go (renamed from vendor/github.com/containers/image/v4/docker/reference/normalize.go)0
-rw-r--r--vendor/github.com/containers/image/v5/docker/reference/reference.go (renamed from vendor/github.com/containers/image/v4/docker/reference/reference.go)0
-rw-r--r--vendor/github.com/containers/image/v5/docker/reference/regexp.go (renamed from vendor/github.com/containers/image/v4/docker/reference/regexp.go)0
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/dest.go (renamed from vendor/github.com/containers/image/v4/docker/tarfile/dest.go)26
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/doc.go (renamed from vendor/github.com/containers/image/v4/docker/tarfile/doc.go)0
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/src.go (renamed from vendor/github.com/containers/image/v4/docker/tarfile/src.go)28
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/types.go (renamed from vendor/github.com/containers/image/v4/docker/tarfile/types.go)2
-rw-r--r--vendor/github.com/containers/image/v5/docker/wwwauthenticate.go (renamed from vendor/github.com/containers/image/v4/docker/wwwauthenticate.go)0
-rw-r--r--vendor/github.com/containers/image/v5/image/docker_list.go34
-rw-r--r--vendor/github.com/containers/image/v5/image/docker_schema1.go (renamed from vendor/github.com/containers/image/v4/image/docker_schema1.go)6
-rw-r--r--vendor/github.com/containers/image/v5/image/docker_schema2.go (renamed from vendor/github.com/containers/image/v4/image/docker_schema2.go)8
-rw-r--r--vendor/github.com/containers/image/v5/image/manifest.go (renamed from vendor/github.com/containers/image/v4/image/manifest.go)8
-rw-r--r--vendor/github.com/containers/image/v5/image/memory.go (renamed from vendor/github.com/containers/image/v4/image/memory.go)3
-rw-r--r--vendor/github.com/containers/image/v5/image/oci.go (renamed from vendor/github.com/containers/image/v4/image/oci.go)8
-rw-r--r--vendor/github.com/containers/image/v5/image/oci_index.go34
-rw-r--r--vendor/github.com/containers/image/v5/image/sourced.go (renamed from vendor/github.com/containers/image/v4/image/sourced.go)4
-rw-r--r--vendor/github.com/containers/image/v5/image/unparsed.go (renamed from vendor/github.com/containers/image/v4/image/unparsed.go)6
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go (renamed from vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go)0
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go (renamed from vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go)0
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go (renamed from vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go)0
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go (renamed from vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go)0
-rw-r--r--vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go (renamed from vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go)0
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema1.go (renamed from vendor/github.com/containers/image/v4/manifest/docker_schema1.go)4
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2.go (renamed from vendor/github.com/containers/image/v4/manifest/docker_schema2.go)6
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go216
-rw-r--r--vendor/github.com/containers/image/v5/manifest/list.go106
-rw-r--r--vendor/github.com/containers/image/v5/manifest/manifest.go (renamed from vendor/github.com/containers/image/v4/manifest/manifest.go)22
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go (renamed from vendor/github.com/containers/image/v4/manifest/oci.go)4
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci_index.go221
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_dest.go (renamed from vendor/github.com/containers/image/v4/oci/archive/oci_dest.go)24
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_src.go (renamed from vendor/github.com/containers/image/v4/oci/archive/oci_src.go)17
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_transport.go (renamed from vendor/github.com/containers/image/v4/oci/archive/oci_transport.go)16
-rw-r--r--vendor/github.com/containers/image/v5/oci/internal/oci_util.go (renamed from vendor/github.com/containers/image/v4/oci/internal/oci_util.go)0
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_dest.go (renamed from vendor/github.com/containers/image/v4/oci/layout/oci_dest.go)82
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_src.go (renamed from vendor/github.com/containers/image/v4/oci/layout/oci_src.go)42
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_transport.go (renamed from vendor/github.com/containers/image/v4/oci/layout/oci_transport.go)16
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift-copies.go (renamed from vendor/github.com/containers/image/v4/openshift/openshift-copies.go)0
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift.go (renamed from vendor/github.com/containers/image/v4/openshift/openshift.go)65
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift_transport.go (renamed from vendor/github.com/containers/image/v4/openshift/openshift_transport.go)10
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_dest.go (renamed from vendor/github.com/containers/image/v4/ostree/ostree_dest.go)23
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_src.go (renamed from vendor/github.com/containers/image/v4/ostree/ostree_src.go)30
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_transport.go (renamed from vendor/github.com/containers/image/v4/ostree/ostree_transport.go)10
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go (renamed from vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go)4
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go (renamed from vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go)6
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go (renamed from vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go)2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go (renamed from vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go)4
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go (renamed from vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go)2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/compression.go (renamed from vendor/github.com/containers/image/v4/pkg/compression/compression.go)4
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/internal/types.go (renamed from vendor/github.com/containers/image/v4/pkg/compression/internal/types.go)0
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/types/types.go (renamed from vendor/github.com/containers/image/v4/pkg/compression/types/types.go)2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/zstd.go (renamed from vendor/github.com/containers/image/v4/pkg/compression/zstd.go)0
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go (renamed from vendor/github.com/containers/image/v4/pkg/docker/config/config.go)43
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go (renamed from vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go)2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go (renamed from vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go)0
-rw-r--r--vendor/github.com/containers/image/v5/pkg/strslice/README.md (renamed from vendor/github.com/containers/image/v4/pkg/strslice/README.md)0
-rw-r--r--vendor/github.com/containers/image/v5/pkg/strslice/strslice.go (renamed from vendor/github.com/containers/image/v4/pkg/strslice/strslice.go)0
-rw-r--r--vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go (renamed from vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go)5
-rw-r--r--vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go (renamed from vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go)0
-rw-r--r--vendor/github.com/containers/image/v5/signature/docker.go (renamed from vendor/github.com/containers/image/v4/signature/docker.go)4
-rw-r--r--vendor/github.com/containers/image/v5/signature/json.go (renamed from vendor/github.com/containers/image/v4/signature/json.go)0
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism.go (renamed from vendor/github.com/containers/image/v4/signature/mechanism.go)0
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go (renamed from vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go)0
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go (renamed from vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go)0
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_config.go (renamed from vendor/github.com/containers/image/v4/signature/policy_config.go)6
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval.go (renamed from vendor/github.com/containers/image/v4/signature/policy_eval.go)2
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go (renamed from vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go)2
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go (renamed from vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go)7
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_simple.go (renamed from vendor/github.com/containers/image/v4/signature/policy_eval_simple.go)4
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_reference_match.go (renamed from vendor/github.com/containers/image/v4/signature/policy_reference_match.go)6
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_types.go (renamed from vendor/github.com/containers/image/v4/signature/policy_types.go)0
-rw-r--r--vendor/github.com/containers/image/v5/signature/signature.go (renamed from vendor/github.com/containers/image/v4/signature/signature.go)5
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_image.go (renamed from vendor/github.com/containers/image/v4/storage/storage_image.go)247
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_reference.go (renamed from vendor/github.com/containers/image/v4/storage/storage_reference.go)90
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_transport.go (renamed from vendor/github.com/containers/image/v4/storage/storage_transport.go)11
-rw-r--r--vendor/github.com/containers/image/v5/tarball/doc.go (renamed from vendor/github.com/containers/image/v4/tarball/doc.go)9
-rw-r--r--vendor/github.com/containers/image/v5/tarball/tarball_reference.go (renamed from vendor/github.com/containers/image/v4/tarball/tarball_reference.go)6
-rw-r--r--vendor/github.com/containers/image/v5/tarball/tarball_src.go (renamed from vendor/github.com/containers/image/v4/tarball/tarball_src.go)18
-rw-r--r--vendor/github.com/containers/image/v5/tarball/tarball_transport.go (renamed from vendor/github.com/containers/image/v4/tarball/tarball_transport.go)4
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go (renamed from vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go)18
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go (renamed from vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go)2
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go (renamed from vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go)2
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/ostree.go (renamed from vendor/github.com/containers/image/v4/transports/alltransports/ostree.go)2
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go (renamed from vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go)2
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/storage.go (renamed from vendor/github.com/containers/image/v4/transports/alltransports/storage.go)2
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go (renamed from vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go)2
-rw-r--r--vendor/github.com/containers/image/v5/transports/stub.go (renamed from vendor/github.com/containers/image/v4/transports/stub.go)2
-rw-r--r--vendor/github.com/containers/image/v5/transports/transports.go (renamed from vendor/github.com/containers/image/v4/transports/transports.go)2
-rw-r--r--vendor/github.com/containers/image/v5/types/types.go (renamed from vendor/github.com/containers/image/v4/types/types.go)36
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go (renamed from vendor/github.com/containers/image/v4/version/version.go)4
-rw-r--r--vendor/github.com/docker/docker/NOTICE2
-rw-r--r--vendor/github.com/docker/docker/api/swagger.yaml39
-rw-r--r--vendor/github.com/docker/docker/api/types/container/host_config.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/filters/parse.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/registry.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/types.go1
-rw-r--r--vendor/github.com/docker/docker/client/client.go3
-rw-r--r--vendor/github.com/docker/docker/client/container_list.go1
-rw-r--r--vendor/github.com/docker/docker/client/events.go1
-rw-r--r--vendor/github.com/docker/docker/client/hijack.go2
-rw-r--r--vendor/github.com/docker/docker/client/image_list.go1
-rw-r--r--vendor/github.com/docker/docker/client/network_list.go1
-rw-r--r--vendor/github.com/docker/docker/client/plugin_list.go1
-rw-r--r--vendor/github.com/docker/docker/client/request.go17
-rw-r--r--vendor/github.com/docker/docker/client/service_create.go2
-rw-r--r--vendor/github.com/docker/docker/client/volume_list.go1
-rw-r--r--vendor/github.com/docker/docker/errdefs/http_helpers.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go1294
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_linux.go261
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_other.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_unix.go (renamed from vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go)92
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_windows.go (renamed from vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go)58
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes.go445
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_linux.go286
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_other.go97
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_unix.go43
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_windows.go34
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy.go480
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_unix.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff.go260
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_linux.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_unsupported.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/whiteouts.go (renamed from vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go)6
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/wrap.go59
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_others.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go (renamed from vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go)203
-rw-r--r--vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go4
-rw-r--r--vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go14
-rw-r--r--vendor/github.com/docker/docker/pkg/pools/pools.go1
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys_unix.go (renamed from vendor/github.com/docker/docker/pkg/system/filesys.go)6
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys_windows.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_linux.go8
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_linux.go3
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_solaris.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/syscall_windows.go19
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_linux.go25
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_unix.go (renamed from vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go)14
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term_windows.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/windows.go3
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/default.json4
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/seccomp.go12
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go4
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/.gitattributes1
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/.gitignore2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/.golangci.yaml29
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/.travis.yml4
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/AUTHORS1
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/Makefile19
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/README.md2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/appveyor.yml10
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/auth.go3
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client.go50
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client_windows.go3
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container.go69
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/distribution.go3
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/event.go10
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/exec.go11
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/go.mod12
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/go.sum17
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/image.go107
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go509
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go106
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go11
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go16
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go11
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go29
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/term/term.go11
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go16
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go22
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/misc.go6
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/network.go16
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/plugin.go93
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm.go10
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_configs.go10
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_node.go8
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go10
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_service.go12
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_task.go4
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/system.go3
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/tar.go2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/tls.go2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/volume.go10
-rw-r--r--vendor/github.com/ijc/Gotty/LICENSE26
-rw-r--r--vendor/github.com/ijc/Gotty/README5
-rw-r--r--vendor/github.com/ijc/Gotty/TODO3
-rw-r--r--vendor/github.com/ijc/Gotty/attributes.go514
-rw-r--r--vendor/github.com/ijc/Gotty/gotty.go244
-rw-r--r--vendor/github.com/ijc/Gotty/parser.go362
-rw-r--r--vendor/github.com/ijc/Gotty/types.go23
-rw-r--r--vendor/github.com/morikuni/aec/LICENSE21
-rw-r--r--vendor/github.com/morikuni/aec/README.md178
-rw-r--r--vendor/github.com/morikuni/aec/aec.go137
-rw-r--r--vendor/github.com/morikuni/aec/ansi.go59
-rw-r--r--vendor/github.com/morikuni/aec/builder.go388
-rw-r--r--vendor/github.com/morikuni/aec/sample.gifbin0 -> 12548 bytes
-rw-r--r--vendor/github.com/morikuni/aec/sgr.go202
-rw-r--r--vendor/github.com/openshift/imagebuilder/OWNERS2
-rw-r--r--vendor/github.com/openshift/imagebuilder/README.md5
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go3
-rw-r--r--vendor/github.com/openshift/imagebuilder/constants.go4
-rw-r--r--vendor/github.com/openshift/imagebuilder/evaluator.go20
-rw-r--r--vendor/github.com/openshift/imagebuilder/vendor.conf2
-rw-r--r--vendor/golang.org/x/sync/errgroup/errgroup.go66
-rw-r--r--vendor/modules.txt104
298 files changed, 10117 insertions, 3458 deletions
diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE
new file mode 100644
index 000000000..584149b6e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE
new file mode 100644
index 000000000..8915f0277
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/NOTICE
@@ -0,0 +1,16 @@
+Docker
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go
new file mode 100644
index 000000000..b5200afc0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/errdefs/errors.go
@@ -0,0 +1,93 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package errdefs defines the common errors used throughout containerd
+// packages.
+//
+// Use with errors.Wrap and error.Wrapf to add context to an error.
+//
+// To detect an error class, use the IsXXX functions to tell whether an error
+// is of a certain type.
+//
+// The functions ToGRPC and FromGRPC can be used to map server-side and
+// client-side errors to the correct types.
+package errdefs
+
+import (
+ "context"
+
+ "github.com/pkg/errors"
+)
+
+// Definitions of common error types used throughout containerd. All containerd
+// errors returned by most packages will map into one of these errors classes.
+// Packages should return errors of these types when they want to instruct a
+// client to take a particular action.
+//
+// For the most part, we just try to provide local grpc errors. Most conditions
+// map very well to those defined by grpc.
+var (
+ ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
+ ErrInvalidArgument = errors.New("invalid argument")
+ ErrNotFound = errors.New("not found")
+ ErrAlreadyExists = errors.New("already exists")
+ ErrFailedPrecondition = errors.New("failed precondition")
+ ErrUnavailable = errors.New("unavailable")
+ ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
+)
+
+// IsInvalidArgument returns true if the error is due to an invalid argument
+func IsInvalidArgument(err error) bool {
+ return errors.Cause(err) == ErrInvalidArgument
+}
+
+// IsNotFound returns true if the error is due to a missing object
+func IsNotFound(err error) bool {
+ return errors.Cause(err) == ErrNotFound
+}
+
+// IsAlreadyExists returns true if the error is due to an already existing
+// metadata item
+func IsAlreadyExists(err error) bool {
+ return errors.Cause(err) == ErrAlreadyExists
+}
+
+// IsFailedPrecondition returns true if an operation could not proceed to the
+// lack of a particular condition
+func IsFailedPrecondition(err error) bool {
+ return errors.Cause(err) == ErrFailedPrecondition
+}
+
+// IsUnavailable returns true if the error is due to a resource being unavailable
+func IsUnavailable(err error) bool {
+ return errors.Cause(err) == ErrUnavailable
+}
+
+// IsNotImplemented returns true if the error is due to not being implemented
+func IsNotImplemented(err error) bool {
+ return errors.Cause(err) == ErrNotImplemented
+}
+
+// IsCanceled returns true if the error is due to `context.Canceled`.
+func IsCanceled(err error) bool {
+ return errors.Cause(err) == context.Canceled
+}
+
+// IsDeadlineExceeded returns true if the error is due to
+// `context.DeadlineExceeded`.
+func IsDeadlineExceeded(err error) bool {
+ return errors.Cause(err) == context.DeadlineExceeded
+}
diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go
new file mode 100644
index 000000000..209f63bd0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go
@@ -0,0 +1,147 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package errdefs
+
+import (
+ "context"
+ "strings"
+
+ "github.com/pkg/errors"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// ToGRPC will attempt to map the backend containerd error into a grpc error,
+// using the original error message as a description.
+//
+// Further information may be extracted from certain errors depending on their
+// type.
+//
+// If the error is unmapped, the original error will be returned to be handled
+// by the regular grpc error handling stack.
+func ToGRPC(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ if isGRPCError(err) {
+ // error has already been mapped to grpc
+ return err
+ }
+
+ switch {
+ case IsInvalidArgument(err):
+ return status.Errorf(codes.InvalidArgument, err.Error())
+ case IsNotFound(err):
+ return status.Errorf(codes.NotFound, err.Error())
+ case IsAlreadyExists(err):
+ return status.Errorf(codes.AlreadyExists, err.Error())
+ case IsFailedPrecondition(err):
+ return status.Errorf(codes.FailedPrecondition, err.Error())
+ case IsUnavailable(err):
+ return status.Errorf(codes.Unavailable, err.Error())
+ case IsNotImplemented(err):
+ return status.Errorf(codes.Unimplemented, err.Error())
+ case IsCanceled(err):
+ return status.Errorf(codes.Canceled, err.Error())
+ case IsDeadlineExceeded(err):
+ return status.Errorf(codes.DeadlineExceeded, err.Error())
+ }
+
+ return err
+}
+
+// ToGRPCf maps the error to grpc error codes, assembling the formatting string
+// and combining it with the target error string.
+//
+// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
+func ToGRPCf(err error, format string, args ...interface{}) error {
+ return ToGRPC(errors.Wrapf(err, format, args...))
+}
+
+// FromGRPC returns the underlying error from a grpc service based on the grpc error code
+func FromGRPC(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ var cls error // divide these into error classes, becomes the cause
+
+ switch code(err) {
+ case codes.InvalidArgument:
+ cls = ErrInvalidArgument
+ case codes.AlreadyExists:
+ cls = ErrAlreadyExists
+ case codes.NotFound:
+ cls = ErrNotFound
+ case codes.Unavailable:
+ cls = ErrUnavailable
+ case codes.FailedPrecondition:
+ cls = ErrFailedPrecondition
+ case codes.Unimplemented:
+ cls = ErrNotImplemented
+ case codes.Canceled:
+ cls = context.Canceled
+ case codes.DeadlineExceeded:
+ cls = context.DeadlineExceeded
+ default:
+ cls = ErrUnknown
+ }
+
+ msg := rebaseMessage(cls, err)
+ if msg != "" {
+ err = errors.Wrap(cls, msg)
+ } else {
+ err = errors.WithStack(cls)
+ }
+
+ return err
+}
+
+// rebaseMessage removes the repeats for an error at the end of an error
+// string. This will happen when taking an error over grpc then remapping it.
+//
+// Effectively, we just remove the string of cls from the end of err if it
+// appears there.
+func rebaseMessage(cls error, err error) string {
+ desc := errDesc(err)
+ clss := cls.Error()
+ if desc == clss {
+ return ""
+ }
+
+ return strings.TrimSuffix(desc, ": "+clss)
+}
+
+func isGRPCError(err error) bool {
+ _, ok := status.FromError(err)
+ return ok
+}
+
+func code(err error) codes.Code {
+ if s, ok := status.FromError(err); ok {
+ return s.Code()
+ }
+ return codes.Unknown
+}
+
+func errDesc(err error) string {
+ if s, ok := status.FromError(err); ok {
+ return s.Message()
+ }
+ return err.Error()
+}
diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go
new file mode 100644
index 000000000..ad61022ad
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/copy.go
@@ -0,0 +1,172 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/pkg/errors"
+)
+
+var bufferPool = &sync.Pool{
+ New: func() interface{} {
+ buffer := make([]byte, 32*1024)
+ return &buffer
+ },
+}
+
+// XAttrErrorHandlers transform a non-nil xattr error.
+// Return nil to ignore an error.
+// xattrKey can be empty for listxattr operation.
+type XAttrErrorHandler func(dst, src, xattrKey string, err error) error
+
+type copyDirOpts struct {
+ xeh XAttrErrorHandler
+}
+
+type CopyDirOpt func(*copyDirOpts) error
+
+// WithXAttrErrorHandler allows specifying XAttrErrorHandler
+// If nil XAttrErrorHandler is specified (default), CopyDir stops
+// on a non-nil xattr error.
+func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt {
+ return func(o *copyDirOpts) error {
+ o.xeh = xeh
+ return nil
+ }
+}
+
+// WithAllowXAttrErrors allows ignoring xattr errors.
+func WithAllowXAttrErrors() CopyDirOpt {
+ xeh := func(dst, src, xattrKey string, err error) error {
+ return nil
+ }
+ return WithXAttrErrorHandler(xeh)
+}
+
+// CopyDir copies the directory from src to dst.
+// Most efficient copy of files is attempted.
+func CopyDir(dst, src string, opts ...CopyDirOpt) error {
+ var o copyDirOpts
+ for _, opt := range opts {
+ if err := opt(&o); err != nil {
+ return err
+ }
+ }
+ inodes := map[uint64]string{}
+ return copyDirectory(dst, src, inodes, &o)
+}
+
+func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error {
+ stat, err := os.Stat(src)
+ if err != nil {
+ return errors.Wrapf(err, "failed to stat %s", src)
+ }
+ if !stat.IsDir() {
+ return errors.Errorf("source is not directory")
+ }
+
+ if st, err := os.Stat(dst); err != nil {
+ if err := os.Mkdir(dst, stat.Mode()); err != nil {
+ return errors.Wrapf(err, "failed to mkdir %s", dst)
+ }
+ } else if !st.IsDir() {
+ return errors.Errorf("cannot copy to non-directory: %s", dst)
+ } else {
+ if err := os.Chmod(dst, stat.Mode()); err != nil {
+ return errors.Wrapf(err, "failed to chmod on %s", dst)
+ }
+ }
+
+ fis, err := ioutil.ReadDir(src)
+ if err != nil {
+ return errors.Wrapf(err, "failed to read %s", src)
+ }
+
+ if err := copyFileInfo(stat, dst); err != nil {
+ return errors.Wrapf(err, "failed to copy file info for %s", dst)
+ }
+
+ for _, fi := range fis {
+ source := filepath.Join(src, fi.Name())
+ target := filepath.Join(dst, fi.Name())
+
+ switch {
+ case fi.IsDir():
+ if err := copyDirectory(target, source, inodes, o); err != nil {
+ return err
+ }
+ continue
+ case (fi.Mode() & os.ModeType) == 0:
+ link, err := getLinkSource(target, fi, inodes)
+ if err != nil {
+ return errors.Wrap(err, "failed to get hardlink")
+ }
+ if link != "" {
+ if err := os.Link(link, target); err != nil {
+ return errors.Wrap(err, "failed to create hard link")
+ }
+ } else if err := CopyFile(target, source); err != nil {
+ return errors.Wrap(err, "failed to copy files")
+ }
+ case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink:
+ link, err := os.Readlink(source)
+ if err != nil {
+ return errors.Wrapf(err, "failed to read link: %s", source)
+ }
+ if err := os.Symlink(link, target); err != nil {
+ return errors.Wrapf(err, "failed to create symlink: %s", target)
+ }
+ case (fi.Mode() & os.ModeDevice) == os.ModeDevice:
+ if err := copyDevice(target, fi); err != nil {
+ return errors.Wrapf(err, "failed to create device")
+ }
+ default:
+ // TODO: Support pipes and sockets
+ return errors.Wrapf(err, "unsupported mode %s", fi.Mode())
+ }
+ if err := copyFileInfo(fi, target); err != nil {
+ return errors.Wrap(err, "failed to copy file info")
+ }
+
+ if err := copyXAttrs(target, source, o.xeh); err != nil {
+ return errors.Wrap(err, "failed to copy xattrs")
+ }
+ }
+
+ return nil
+}
+
+// CopyFile copies the source file to the target.
+// The most efficient means of copying is used for the platform.
+func CopyFile(target, source string) error {
+ src, err := os.Open(source)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open source %s", source)
+ }
+ defer src.Close()
+ tgt, err := os.Create(target)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open target %s", target)
+ }
+ defer tgt.Close()
+
+ return copyFileContent(tgt, src)
+}
diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go
new file mode 100644
index 000000000..81c71522a
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/copy_linux.go
@@ -0,0 +1,144 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "io"
+ "os"
+ "syscall"
+
+ "github.com/containerd/continuity/sysx"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+func copyFileInfo(fi os.FileInfo, name string) error {
+ st := fi.Sys().(*syscall.Stat_t)
+ if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
+ if os.IsPermission(err) {
+ // Normally if uid/gid are the same this would be a no-op, but some
+ // filesystems may still return EPERM... for instance NFS does this.
+ // In such a case, this is not an error.
+ if dstStat, err2 := os.Lstat(name); err2 == nil {
+ st2 := dstStat.Sys().(*syscall.Stat_t)
+ if st.Uid == st2.Uid && st.Gid == st2.Gid {
+ err = nil
+ }
+ }
+ }
+ if err != nil {
+ return errors.Wrapf(err, "failed to chown %s", name)
+ }
+ }
+
+ if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
+ if err := os.Chmod(name, fi.Mode()); err != nil {
+ return errors.Wrapf(err, "failed to chmod %s", name)
+ }
+ }
+
+ timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))}
+ if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
+ return errors.Wrapf(err, "failed to utime %s", name)
+ }
+
+ return nil
+}
+
+const maxSSizeT = int64(^uint(0) >> 1)
+
+func copyFileContent(dst, src *os.File) error {
+ st, err := src.Stat()
+ if err != nil {
+ return errors.Wrap(err, "unable to stat source")
+ }
+
+ size := st.Size()
+ first := true
+ srcFd := int(src.Fd())
+ dstFd := int(dst.Fd())
+
+ for size > 0 {
+ // Ensure that we are never trying to copy more than SSIZE_MAX at a
+ // time and at the same time avoids overflows when the file is larger
+ // than 4GB on 32-bit systems.
+ var copySize int
+ if size > maxSSizeT {
+ copySize = int(maxSSizeT)
+ } else {
+ copySize = int(size)
+ }
+ n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0)
+ if err != nil {
+ if (err != unix.ENOSYS && err != unix.EXDEV) || !first {
+ return errors.Wrap(err, "copy file range failed")
+ }
+
+ buf := bufferPool.Get().(*[]byte)
+ _, err = io.CopyBuffer(dst, src, *buf)
+ bufferPool.Put(buf)
+ return errors.Wrap(err, "userspace copy failed")
+ }
+
+ first = false
+ size -= int64(n)
+ }
+
+ return nil
+}
+
+func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
+ xattrKeys, err := sysx.LListxattr(src)
+ if err != nil {
+ e := errors.Wrapf(err, "failed to list xattrs on %s", src)
+ if xeh != nil {
+ e = xeh(dst, src, "", e)
+ }
+ return e
+ }
+ for _, xattr := range xattrKeys {
+ data, err := sysx.LGetxattr(src, xattr)
+ if err != nil {
+ e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
+ if xeh != nil {
+ if e = xeh(dst, src, xattr, e); e == nil {
+ continue
+ }
+ }
+ return e
+ }
+ if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
+ e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
+ if xeh != nil {
+ if e = xeh(dst, src, xattr, e); e == nil {
+ continue
+ }
+ }
+ return e
+ }
+ }
+
+ return nil
+}
+
+func copyDevice(dst string, fi os.FileInfo) error {
+ st, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return errors.New("unsupported stat type")
+ }
+ return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
+}
diff --git a/vendor/github.com/containerd/continuity/fs/copy_unix.go b/vendor/github.com/containerd/continuity/fs/copy_unix.go
new file mode 100644
index 000000000..73c01a46d
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/copy_unix.go
@@ -0,0 +1,112 @@
+// +build solaris darwin freebsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "io"
+ "os"
+ "syscall"
+
+ "github.com/containerd/continuity/sysx"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+func copyFileInfo(fi os.FileInfo, name string) error {
+ st := fi.Sys().(*syscall.Stat_t)
+ if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
+ if os.IsPermission(err) {
+ // Normally if uid/gid are the same this would be a no-op, but some
+ // filesystems may still return EPERM... for instance NFS does this.
+ // In such a case, this is not an error.
+ if dstStat, err2 := os.Lstat(name); err2 == nil {
+ st2 := dstStat.Sys().(*syscall.Stat_t)
+ if st.Uid == st2.Uid && st.Gid == st2.Gid {
+ err = nil
+ }
+ }
+ }
+ if err != nil {
+ return errors.Wrapf(err, "failed to chown %s", name)
+ }
+ }
+
+ if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
+ if err := os.Chmod(name, fi.Mode()); err != nil {
+ return errors.Wrapf(err, "failed to chmod %s", name)
+ }
+ }
+
+ timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)}
+ if err := syscall.UtimesNano(name, timespec); err != nil {
+ return errors.Wrapf(err, "failed to utime %s", name)
+ }
+
+ return nil
+}
+
+func copyFileContent(dst, src *os.File) error {
+ buf := bufferPool.Get().(*[]byte)
+ _, err := io.CopyBuffer(dst, src, *buf)
+ bufferPool.Put(buf)
+
+ return err
+}
+
+func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
+ xattrKeys, err := sysx.LListxattr(src)
+ if err != nil {
+ e := errors.Wrapf(err, "failed to list xattrs on %s", src)
+ if xeh != nil {
+ e = xeh(dst, src, "", e)
+ }
+ return e
+ }
+ for _, xattr := range xattrKeys {
+ data, err := sysx.LGetxattr(src, xattr)
+ if err != nil {
+ e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
+ if xeh != nil {
+ if e = xeh(dst, src, xattr, e); e == nil {
+ continue
+ }
+ }
+ return e
+ }
+ if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
+ e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
+ if xeh != nil {
+ if e = xeh(dst, src, xattr, e); e == nil {
+ continue
+ }
+ }
+ return e
+ }
+ }
+
+ return nil
+}
+
+func copyDevice(dst string, fi os.FileInfo) error {
+ st, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return errors.New("unsupported stat type")
+ }
+ return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
+}
diff --git a/vendor/github.com/containerd/continuity/fs/copy_windows.go b/vendor/github.com/containerd/continuity/fs/copy_windows.go
new file mode 100644
index 000000000..27c7d7dbb
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/copy_windows.go
@@ -0,0 +1,49 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "io"
+ "os"
+
+ "github.com/pkg/errors"
+)
+
+func copyFileInfo(fi os.FileInfo, name string) error {
+ if err := os.Chmod(name, fi.Mode()); err != nil {
+ return errors.Wrapf(err, "failed to chmod %s", name)
+ }
+
+ // TODO: copy windows specific metadata
+
+ return nil
+}
+
+func copyFileContent(dst, src *os.File) error {
+ buf := bufferPool.Get().(*[]byte)
+ _, err := io.CopyBuffer(dst, src, *buf)
+ bufferPool.Put(buf)
+ return err
+}
+
+func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
+ return nil
+}
+
+func copyDevice(dst string, fi os.FileInfo) error {
+ return errors.New("device copy not supported")
+}
diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go
new file mode 100644
index 000000000..e64f9e73d
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/diff.go
@@ -0,0 +1,326 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/sync/errgroup"
+
+ "github.com/sirupsen/logrus"
+)
+
+// ChangeKind is the type of modification that
+// a change is making.
+type ChangeKind int
+
+const (
+ // ChangeKindUnmodified represents an unmodified
+ // file
+ ChangeKindUnmodified = iota
+
+ // ChangeKindAdd represents an addition of
+ // a file
+ ChangeKindAdd
+
+ // ChangeKindModify represents a change to
+ // an existing file
+ ChangeKindModify
+
+ // ChangeKindDelete represents a delete of
+ // a file
+ ChangeKindDelete
+)
+
+func (k ChangeKind) String() string {
+ switch k {
+ case ChangeKindUnmodified:
+ return "unmodified"
+ case ChangeKindAdd:
+ return "add"
+ case ChangeKindModify:
+ return "modify"
+ case ChangeKindDelete:
+ return "delete"
+ default:
+ return ""
+ }
+}
+
+// Change represents single change between a diff and its parent.
+type Change struct {
+ Kind ChangeKind
+ Path string
+}
+
+// ChangeFunc is the type of function called for each change
+// computed during a directory changes calculation.
+type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error
+
+// Changes computes changes between two directories calling the
+// given change function for each computed change. The first
+// directory is intended to the base directory and second
+// directory the changed directory.
+//
+// The change callback is called by the order of path names and
+// should be appliable in that order.
+// Due to this apply ordering, the following is true
+// - Removed directory trees only create a single change for the root
+// directory removed. Remaining changes are implied.
+// - A directory which is modified to become a file will not have
+// delete entries for sub-path items, their removal is implied
+// by the removal of the parent directory.
+//
+// Opaque directories will not be treated specially and each file
+// removed from the base directory will show up as a removal.
+//
+// File content comparisons will be done on files which have timestamps
+// which may have been truncated. If either of the files being compared
+// has a zero value nanosecond value, each byte will be compared for
+// differences. If 2 files have the same seconds value but different
+// nanosecond values where one of those values is zero, the files will
+// be considered unchanged if the content is the same. This behavior
+// is to account for timestamp truncation during archiving.
+func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error {
+ if a == "" {
+ logrus.Debugf("Using single walk diff for %s", b)
+ return addDirChanges(ctx, changeFn, b)
+ } else if diffOptions := detectDirDiff(b, a); diffOptions != nil {
+ logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a)
+ return diffDirChanges(ctx, changeFn, a, diffOptions)
+ }
+
+ logrus.Debugf("Using double walk diff for %s from %s", b, a)
+ return doubleWalkDiff(ctx, changeFn, a, b)
+}
+
+func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error {
+ return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(root, path)
+ if err != nil {
+ return err
+ }
+
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ return changeFn(ChangeKindAdd, path, f, nil)
+ })
+}
+
+// diffDirOptions is used when the diff can be directly calculated from
+// a diff directory to its base, without walking both trees.
+type diffDirOptions struct {
+ diffDir string
+ skipChange func(string) (bool, error)
+ deleteChange func(string, string, os.FileInfo) (string, error)
+}
+
+// diffDirChanges walks the diff directory and compares changes against the base.
+func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error {
+ changedDirs := make(map[string]struct{})
+ return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(o.diffDir, path)
+ if err != nil {
+ return err
+ }
+
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ // TODO: handle opaqueness, start new double walker at this
+ // location to get deletes, and skip tree in single walker
+
+ if o.skipChange != nil {
+ if skip, err := o.skipChange(path); skip {
+ return err
+ }
+ }
+
+ var kind ChangeKind
+
+ deletedFile, err := o.deleteChange(o.diffDir, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ path = deletedFile
+ kind = ChangeKindDelete
+ f = nil
+ } else {
+ // Otherwise, the file was added
+ kind = ChangeKindAdd
+
+ // ...Unless it already existed in a base, in which case, it's a modification
+ stat, err := os.Stat(filepath.Join(base, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the base, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ kind = ChangeKindModify
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if kind == ChangeKindAdd || kind == ChangeKindDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ pi, err := os.Stat(filepath.Join(o.diffDir, parent))
+ if err := changeFn(ChangeKindModify, parent, pi, err); err != nil {
+ return err
+ }
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ return changeFn(kind, path, f, nil)
+ })
+}
+
+// doubleWalkDiff walks both directories to create a diff
+func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) {
+ g, ctx := errgroup.WithContext(ctx)
+
+ var (
+ c1 = make(chan *currentPath)
+ c2 = make(chan *currentPath)
+
+ f1, f2 *currentPath
+ rmdir string
+ )
+ g.Go(func() error {
+ defer close(c1)
+ return pathWalk(ctx, a, c1)
+ })
+ g.Go(func() error {
+ defer close(c2)
+ return pathWalk(ctx, b, c2)
+ })
+ g.Go(func() error {
+ for c1 != nil || c2 != nil {
+ if f1 == nil && c1 != nil {
+ f1, err = nextPath(ctx, c1)
+ if err != nil {
+ return err
+ }
+ if f1 == nil {
+ c1 = nil
+ }
+ }
+
+ if f2 == nil && c2 != nil {
+ f2, err = nextPath(ctx, c2)
+ if err != nil {
+ return err
+ }
+ if f2 == nil {
+ c2 = nil
+ }
+ }
+ if f1 == nil && f2 == nil {
+ continue
+ }
+
+ var f os.FileInfo
+ k, p := pathChange(f1, f2)
+ switch k {
+ case ChangeKindAdd:
+ if rmdir != "" {
+ rmdir = ""
+ }
+ f = f2.f
+ f2 = nil
+ case ChangeKindDelete:
+ // Check if this file is already removed by being
+ // under of a removed directory
+ if rmdir != "" && strings.HasPrefix(f1.path, rmdir) {
+ f1 = nil
+ continue
+ } else if f1.f.IsDir() {
+ rmdir = f1.path + string(os.PathSeparator)
+ } else if rmdir != "" {
+ rmdir = ""
+ }
+ f1 = nil
+ case ChangeKindModify:
+ same, err := sameFile(f1, f2)
+ if err != nil {
+ return err
+ }
+ if f1.f.IsDir() && !f2.f.IsDir() {
+ rmdir = f1.path + string(os.PathSeparator)
+ } else if rmdir != "" {
+ rmdir = ""
+ }
+ f = f2.f
+ f1 = nil
+ f2 = nil
+ if same {
+ if !isLinked(f) {
+ continue
+ }
+ k = ChangeKindUnmodified
+ }
+ }
+ if err := changeFn(k, p, f, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ return g.Wait()
+}
diff --git a/vendor/github.com/containerd/continuity/fs/diff_unix.go b/vendor/github.com/containerd/continuity/fs/diff_unix.go
new file mode 100644
index 000000000..7913af27d
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/diff_unix.go
@@ -0,0 +1,74 @@
+// +build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "bytes"
+ "os"
+ "syscall"
+
+ "github.com/containerd/continuity/sysx"
+ "github.com/pkg/errors"
+)
+
+// detectDirDiff returns diff dir options if a directory could
+// be found in the mount info for upper which is the direct
+// diff with the provided lower directory
+func detectDirDiff(upper, lower string) *diffDirOptions {
+ // TODO: get mount options for upper
+ // TODO: detect AUFS
+ // TODO: detect overlay
+ return nil
+}
+
+// compareSysStat returns whether the stats are equivalent,
+// whether the files are considered the same file, and
+// an error
+func compareSysStat(s1, s2 interface{}) (bool, error) {
+ ls1, ok := s1.(*syscall.Stat_t)
+ if !ok {
+ return false, nil
+ }
+ ls2, ok := s2.(*syscall.Stat_t)
+ if !ok {
+ return false, nil
+ }
+
+ return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil
+}
+
+func compareCapabilities(p1, p2 string) (bool, error) {
+ c1, err := sysx.LGetxattr(p1, "security.capability")
+ if err != nil && err != sysx.ENODATA {
+ return false, errors.Wrapf(err, "failed to get xattr for %s", p1)
+ }
+ c2, err := sysx.LGetxattr(p2, "security.capability")
+ if err != nil && err != sysx.ENODATA {
+ return false, errors.Wrapf(err, "failed to get xattr for %s", p2)
+ }
+ return bytes.Equal(c1, c2), nil
+}
+
+func isLinked(f os.FileInfo) bool {
+ s, ok := f.Sys().(*syscall.Stat_t)
+ if !ok {
+ return false
+ }
+ return !f.IsDir() && s.Nlink > 1
+}
diff --git a/vendor/github.com/containerd/continuity/fs/diff_windows.go b/vendor/github.com/containerd/continuity/fs/diff_windows.go
new file mode 100644
index 000000000..4bfa72d3a
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/diff_windows.go
@@ -0,0 +1,48 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "os"
+
+ "golang.org/x/sys/windows"
+)
+
+func detectDirDiff(upper, lower string) *diffDirOptions {
+ return nil
+}
+
+func compareSysStat(s1, s2 interface{}) (bool, error) {
+ f1, ok := s1.(windows.Win32FileAttributeData)
+ if !ok {
+ return false, nil
+ }
+ f2, ok := s2.(windows.Win32FileAttributeData)
+ if !ok {
+ return false, nil
+ }
+ return f1.FileAttributes == f2.FileAttributes, nil
+}
+
+func compareCapabilities(p1, p2 string) (bool, error) {
+ // TODO: Use windows equivalent
+ return true, nil
+}
+
+func isLinked(os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/vendor/github.com/containerd/continuity/fs/dtype_linux.go
new file mode 100644
index 000000000..10510d8de
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/dtype_linux.go
@@ -0,0 +1,103 @@
+// +build linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func locateDummyIfEmpty(path string) (string, error) {
+ children, err := ioutil.ReadDir(path)
+ if err != nil {
+ return "", err
+ }
+ if len(children) != 0 {
+ return "", nil
+ }
+ dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
+ if err != nil {
+ return "", err
+ }
+ name := dummyFile.Name()
+ err = dummyFile.Close()
+ return name, err
+}
+
+// SupportsDType returns whether the filesystem mounted on path supports d_type
+func SupportsDType(path string) (bool, error) {
+ // locate dummy so that we have at least one dirent
+ dummy, err := locateDummyIfEmpty(path)
+ if err != nil {
+ return false, err
+ }
+ if dummy != "" {
+ defer os.Remove(dummy)
+ }
+
+ visited := 0
+ supportsDType := true
+ fn := func(ent *syscall.Dirent) bool {
+ visited++
+ if ent.Type == syscall.DT_UNKNOWN {
+ supportsDType = false
+ // stop iteration
+ return true
+ }
+ // continue iteration
+ return false
+ }
+ if err = iterateReadDir(path, fn); err != nil {
+ return false, err
+ }
+ if visited == 0 {
+ return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
+ }
+ return supportsDType, nil
+}
+
+func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error {
+ d, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer d.Close()
+ fd := int(d.Fd())
+ buf := make([]byte, 4096)
+ for {
+ nbytes, err := syscall.ReadDirent(fd, buf)
+ if err != nil {
+ return err
+ }
+ if nbytes == 0 {
+ break
+ }
+ for off := 0; off < nbytes; {
+ ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off]))
+ if stop := fn(ent); stop {
+ return nil
+ }
+ off += int(ent.Reclen)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containerd/continuity/fs/du.go b/vendor/github.com/containerd/continuity/fs/du.go
new file mode 100644
index 000000000..fccc985dc
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/du.go
@@ -0,0 +1,38 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import "context"
+
+// Usage of disk information
+type Usage struct {
+ Inodes int64
+ Size int64
+}
+
+// DiskUsage counts the number of inodes and disk usage for the resources under
+// path.
+func DiskUsage(ctx context.Context, roots ...string) (Usage, error) {
+ return diskUsage(ctx, roots...)
+}
+
+// DiffUsage counts the numbers of inodes and disk usage in the
+// diff between the 2 directories. The first path is intended
+// as the base directory and the second as the changed directory.
+func DiffUsage(ctx context.Context, a, b string) (Usage, error) {
+ return diffUsage(ctx, a, b)
+}
diff --git a/vendor/github.com/containerd/continuity/fs/du_unix.go b/vendor/github.com/containerd/continuity/fs/du_unix.go
new file mode 100644
index 000000000..e22ffbea3
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/du_unix.go
@@ -0,0 +1,110 @@
+// +build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+type inode struct {
+ // TODO(stevvooe): Can probably reduce memory usage by not tracking
+ // device, but we can leave this right for now.
+ dev, ino uint64
+}
+
+func newInode(stat *syscall.Stat_t) inode {
+ return inode{
+ // Dev is uint32 on darwin/bsd, uint64 on linux/solaris
+ dev: uint64(stat.Dev), // nolint: unconvert
+ // Ino is uint32 on bsd, uint64 on darwin/linux/solaris
+ ino: uint64(stat.Ino), // nolint: unconvert
+ }
+}
+
+func diskUsage(ctx context.Context, roots ...string) (Usage, error) {
+
+ var (
+ size int64
+ inodes = map[inode]struct{}{} // expensive!
+ )
+
+ for _, root := range roots {
+ if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ inoKey := newInode(fi.Sys().(*syscall.Stat_t))
+ if _, ok := inodes[inoKey]; !ok {
+ inodes[inoKey] = struct{}{}
+ size += fi.Size()
+ }
+
+ return nil
+ }); err != nil {
+ return Usage{}, err
+ }
+ }
+
+ return Usage{
+ Inodes: int64(len(inodes)),
+ Size: size,
+ }, nil
+}
+
+func diffUsage(ctx context.Context, a, b string) (Usage, error) {
+ var (
+ size int64
+ inodes = map[inode]struct{}{} // expensive!
+ )
+
+ if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if kind == ChangeKindAdd || kind == ChangeKindModify {
+ inoKey := newInode(fi.Sys().(*syscall.Stat_t))
+ if _, ok := inodes[inoKey]; !ok {
+ inodes[inoKey] = struct{}{}
+ size += fi.Size()
+ }
+
+ return nil
+
+ }
+ return nil
+ }); err != nil {
+ return Usage{}, err
+ }
+
+ return Usage{
+ Inodes: int64(len(inodes)),
+ Size: size,
+ }, nil
+}
diff --git a/vendor/github.com/containerd/continuity/fs/du_windows.go b/vendor/github.com/containerd/continuity/fs/du_windows.go
new file mode 100644
index 000000000..8f25ec59c
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/du_windows.go
@@ -0,0 +1,82 @@
+// +build windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+)
+
+func diskUsage(ctx context.Context, roots ...string) (Usage, error) {
+ var (
+ size int64
+ )
+
+ // TODO(stevvooe): Support inodes (or equivalent) for windows.
+
+ for _, root := range roots {
+ if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ size += fi.Size()
+ return nil
+ }); err != nil {
+ return Usage{}, err
+ }
+ }
+
+ return Usage{
+ Size: size,
+ }, nil
+}
+
+func diffUsage(ctx context.Context, a, b string) (Usage, error) {
+ var (
+ size int64
+ )
+
+ if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if kind == ChangeKindAdd || kind == ChangeKindModify {
+ size += fi.Size()
+
+ return nil
+
+ }
+ return nil
+ }); err != nil {
+ return Usage{}, err
+ }
+
+ return Usage{
+ Size: size,
+ }, nil
+}
diff --git a/vendor/github.com/containerd/continuity/fs/hardlink.go b/vendor/github.com/containerd/continuity/fs/hardlink.go
new file mode 100644
index 000000000..762aa45e6
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/hardlink.go
@@ -0,0 +1,43 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import "os"
+
+// GetLinkInfo returns an identifier representing the node a hardlink is pointing
+// to. If the file is not hard linked then 0 will be returned.
+func GetLinkInfo(fi os.FileInfo) (uint64, bool) {
+ return getLinkInfo(fi)
+}
+
+// getLinkSource returns a path for the given name and
+// file info to its link source in the provided inode
+// map. If the given file name is not in the map and
+// has other links, it is added to the inode map
+// to be a source for other link locations.
+func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
+ inode, isHardlink := getLinkInfo(fi)
+ if !isHardlink {
+ return "", nil
+ }
+
+ path, ok := inodes[inode]
+ if !ok {
+ inodes[inode] = name
+ }
+ return path, nil
+}
diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_unix.go b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go
new file mode 100644
index 000000000..f95f0904c
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go
@@ -0,0 +1,34 @@
+// +build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "os"
+ "syscall"
+)
+
+func getLinkInfo(fi os.FileInfo) (uint64, bool) {
+ s, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, false
+ }
+
+ // Ino is uint32 on bsd, uint64 on darwin/linux/solaris
+ return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert
+}
diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_windows.go b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go
new file mode 100644
index 000000000..748554714
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go
@@ -0,0 +1,23 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import "os"
+
+func getLinkInfo(fi os.FileInfo) (uint64, bool) {
+ return 0, false
+}
diff --git a/vendor/github.com/containerd/continuity/fs/path.go b/vendor/github.com/containerd/continuity/fs/path.go
new file mode 100644
index 000000000..8863caa9d
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/path.go
@@ -0,0 +1,313 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/pkg/errors"
+)
+
+var (
+ errTooManyLinks = errors.New("too many links")
+)
+
+type currentPath struct {
+ path string
+ f os.FileInfo
+ fullPath string
+}
+
+func pathChange(lower, upper *currentPath) (ChangeKind, string) {
+ if lower == nil {
+ if upper == nil {
+ panic("cannot compare nil paths")
+ }
+ return ChangeKindAdd, upper.path
+ }
+ if upper == nil {
+ return ChangeKindDelete, lower.path
+ }
+
+ switch i := directoryCompare(lower.path, upper.path); {
+ case i < 0:
+ // File in lower that is not in upper
+ return ChangeKindDelete, lower.path
+ case i > 0:
+ // File in upper that is not in lower
+ return ChangeKindAdd, upper.path
+ default:
+ return ChangeKindModify, upper.path
+ }
+}
+
+func directoryCompare(a, b string) int {
+ l := len(a)
+ if len(b) < l {
+ l = len(b)
+ }
+ for i := 0; i < l; i++ {
+ c1, c2 := a[i], b[i]
+ if c1 == filepath.Separator {
+ c1 = byte(0)
+ }
+ if c2 == filepath.Separator {
+ c2 = byte(0)
+ }
+ if c1 < c2 {
+ return -1
+ }
+ if c1 > c2 {
+ return +1
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(a) > len(b) {
+ return +1
+ }
+ return 0
+}
+
+func sameFile(f1, f2 *currentPath) (bool, error) {
+ if os.SameFile(f1.f, f2.f) {
+ return true, nil
+ }
+
+ equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys())
+ if err != nil || !equalStat {
+ return equalStat, err
+ }
+
+ if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq {
+ return eq, err
+ }
+
+ // If not a directory also check size, modtime, and content
+ if !f1.f.IsDir() {
+ if f1.f.Size() != f2.f.Size() {
+ return false, nil
+ }
+ t1 := f1.f.ModTime()
+ t2 := f2.f.ModTime()
+
+ if t1.Unix() != t2.Unix() {
+ return false, nil
+ }
+
+ // If the timestamp may have been truncated in both of the
+ // files, check content of file to determine difference
+ if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 {
+ var eq bool
+ if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink {
+ eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath)
+ } else if f1.f.Size() > 0 {
+ eq, err = compareFileContent(f1.fullPath, f2.fullPath)
+ }
+ if err != nil || !eq {
+ return eq, err
+ }
+ } else if t1.Nanosecond() != t2.Nanosecond() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func compareSymlinkTarget(p1, p2 string) (bool, error) {
+ t1, err := os.Readlink(p1)
+ if err != nil {
+ return false, err
+ }
+ t2, err := os.Readlink(p2)
+ if err != nil {
+ return false, err
+ }
+ return t1 == t2, nil
+}
+
+const compareChuckSize = 32 * 1024
+
+// compareFileContent compares the content of 2 same sized files
+// by comparing each byte.
+func compareFileContent(p1, p2 string) (bool, error) {
+ f1, err := os.Open(p1)
+ if err != nil {
+ return false, err
+ }
+ defer f1.Close()
+ f2, err := os.Open(p2)
+ if err != nil {
+ return false, err
+ }
+ defer f2.Close()
+
+ b1 := make([]byte, compareChuckSize)
+ b2 := make([]byte, compareChuckSize)
+ for {
+ n1, err1 := f1.Read(b1)
+ if err1 != nil && err1 != io.EOF {
+ return false, err1
+ }
+ n2, err2 := f2.Read(b2)
+ if err2 != nil && err2 != io.EOF {
+ return false, err2
+ }
+ if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) {
+ return false, nil
+ }
+ if err1 == io.EOF && err2 == io.EOF {
+ return true, nil
+ }
+ }
+}
+
+func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error {
+ return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(root, path)
+ if err != nil {
+ return err
+ }
+
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ p := &currentPath{
+ path: path,
+ f: f,
+ fullPath: filepath.Join(root, path),
+ }
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case pathC <- p:
+ return nil
+ }
+ })
+}
+
+func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case p := <-pathC:
+ return p, nil
+ }
+}
+
+// RootPath joins a path with a root, evaluating and bounding any
+// symlink to the root directory.
+func RootPath(root, path string) (string, error) {
+ if path == "" {
+ return root, nil
+ }
+ var linksWalked int // to protect against cycles
+ for {
+ i := linksWalked
+ newpath, err := walkLinks(root, path, &linksWalked)
+ if err != nil {
+ return "", err
+ }
+ path = newpath
+ if i == linksWalked {
+ newpath = filepath.Join("/", newpath)
+ if path == newpath {
+ return filepath.Join(root, newpath), nil
+ }
+ path = newpath
+ }
+ }
+}
+
+func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) {
+ if *linksWalked > 255 {
+ return "", false, errTooManyLinks
+ }
+
+ path = filepath.Join("/", path)
+ if path == "/" {
+ return path, false, nil
+ }
+ realPath := filepath.Join(root, path)
+
+ fi, err := os.Lstat(realPath)
+ if err != nil {
+ // If path does not yet exist, treat as non-symlink
+ if os.IsNotExist(err) {
+ return path, false, nil
+ }
+ return "", false, err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ return path, false, nil
+ }
+ newpath, err = os.Readlink(realPath)
+ if err != nil {
+ return "", false, err
+ }
+ *linksWalked++
+ return newpath, true, nil
+}
+
+func walkLinks(root, path string, linksWalked *int) (string, error) {
+ switch dir, file := filepath.Split(path); {
+ case dir == "":
+ newpath, _, err := walkLink(root, file, linksWalked)
+ return newpath, err
+ case file == "":
+ if os.IsPathSeparator(dir[len(dir)-1]) {
+ if dir == "/" {
+ return dir, nil
+ }
+ return walkLinks(root, dir[:len(dir)-1], linksWalked)
+ }
+ newpath, _, err := walkLink(root, dir, linksWalked)
+ return newpath, err
+ default:
+ newdir, err := walkLinks(root, dir, linksWalked)
+ if err != nil {
+ return "", err
+ }
+ newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked)
+ if err != nil {
+ return "", err
+ }
+ if !islink {
+ return newpath, nil
+ }
+ if filepath.IsAbs(newpath) {
+ return newpath, nil
+ }
+ return filepath.Join(newdir, newpath), nil
+ }
+}
diff --git a/vendor/github.com/containerd/continuity/fs/stat_bsd.go b/vendor/github.com/containerd/continuity/fs/stat_bsd.go
new file mode 100644
index 000000000..cb7400a33
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/stat_bsd.go
@@ -0,0 +1,44 @@
+// +build darwin freebsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "syscall"
+ "time"
+)
+
+// StatAtime returns the access time from a stat struct
+func StatAtime(st *syscall.Stat_t) syscall.Timespec {
+ return st.Atimespec
+}
+
+// StatCtime returns the created time from a stat struct
+func StatCtime(st *syscall.Stat_t) syscall.Timespec {
+ return st.Ctimespec
+}
+
+// StatMtime returns the modified time from a stat struct
+func StatMtime(st *syscall.Stat_t) syscall.Timespec {
+ return st.Mtimespec
+}
+
+// StatATimeAsTime returns the access time as a time.Time
+func StatATimeAsTime(st *syscall.Stat_t) time.Time {
+ return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert
+}
diff --git a/vendor/github.com/containerd/continuity/fs/stat_linux.go b/vendor/github.com/containerd/continuity/fs/stat_linux.go
new file mode 100644
index 000000000..4a678dd1f
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/stat_linux.go
@@ -0,0 +1,43 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import (
+ "syscall"
+ "time"
+)
+
+// StatAtime returns the Atim
+func StatAtime(st *syscall.Stat_t) syscall.Timespec {
+ return st.Atim
+}
+
+// StatCtime returns the Ctim
+func StatCtime(st *syscall.Stat_t) syscall.Timespec {
+ return st.Ctim
+}
+
+// StatMtime returns the Mtim
+func StatMtime(st *syscall.Stat_t) syscall.Timespec {
+ return st.Mtim
+}
+
+// StatATimeAsTime returns st.Atim as a time.Time
+func StatATimeAsTime(st *syscall.Stat_t) time.Time {
+ // The int64 conversions ensure the line compiles for 32-bit systems as well.
+ return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert
+}
diff --git a/vendor/github.com/containerd/continuity/fs/time.go b/vendor/github.com/containerd/continuity/fs/time.go
new file mode 100644
index 000000000..cde456123
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/fs/time.go
@@ -0,0 +1,29 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fs
+
+import "time"
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go
deleted file mode 100644
index b0d5a6b56..000000000
--- a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package pathdriver
-
-import (
- "path/filepath"
-)
-
-// PathDriver provides all of the path manipulation functions in a common
-// interface. The context should call these and never use the `filepath`
-// package or any other package to manipulate paths.
-type PathDriver interface {
- Join(paths ...string) string
- IsAbs(path string) bool
- Rel(base, target string) (string, error)
- Base(path string) string
- Dir(path string) string
- Clean(path string) string
- Split(path string) (dir, file string)
- Separator() byte
- Abs(path string) (string, error)
- Walk(string, filepath.WalkFunc) error
- FromSlash(path string) string
- ToSlash(path string) string
- Match(pattern, name string) (matched bool, err error)
-}
-
-// pathDriver is a simple default implementation calls the filepath package.
-type pathDriver struct{}
-
-// LocalPathDriver is the exported pathDriver struct for convenience.
-var LocalPathDriver PathDriver = &pathDriver{}
-
-func (*pathDriver) Join(paths ...string) string {
- return filepath.Join(paths...)
-}
-
-func (*pathDriver) IsAbs(path string) bool {
- return filepath.IsAbs(path)
-}
-
-func (*pathDriver) Rel(base, target string) (string, error) {
- return filepath.Rel(base, target)
-}
-
-func (*pathDriver) Base(path string) string {
- return filepath.Base(path)
-}
-
-func (*pathDriver) Dir(path string) string {
- return filepath.Dir(path)
-}
-
-func (*pathDriver) Clean(path string) string {
- return filepath.Clean(path)
-}
-
-func (*pathDriver) Split(path string) (dir, file string) {
- return filepath.Split(path)
-}
-
-func (*pathDriver) Separator() byte {
- return filepath.Separator
-}
-
-func (*pathDriver) Abs(path string) (string, error) {
- return filepath.Abs(path)
-}
-
-// Note that filepath.Walk calls os.Stat, so if the context wants to
-// to call Driver.Stat() for Walk, they need to create a new struct that
-// overrides this method.
-func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
- return filepath.Walk(root, walkFn)
-}
-
-func (*pathDriver) FromSlash(path string) string {
- return filepath.FromSlash(path)
-}
-
-func (*pathDriver) ToSlash(path string) string {
- return filepath.ToSlash(path)
-}
-
-func (*pathDriver) Match(pattern, name string) (bool, error) {
- return filepath.Match(pattern, name)
-}
diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go b/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go
new file mode 100644
index 000000000..0bfa6a040
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go
@@ -0,0 +1,26 @@
+// +build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package syscallx
+
+import "syscall"
+
+// Readlink returns the destination of the named symbolic link.
+func Readlink(path string, buf []byte) (n int, err error) {
+ return syscall.Readlink(path, buf)
+}
diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go b/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go
new file mode 100644
index 000000000..2ba814990
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go
@@ -0,0 +1,112 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package syscallx
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type reparseDataBuffer struct {
+ ReparseTag uint32
+ ReparseDataLength uint16
+ Reserved uint16
+
+ // GenericReparseBuffer
+ reparseBuffer byte
+}
+
+type mountPointReparseBuffer struct {
+ SubstituteNameOffset uint16
+ SubstituteNameLength uint16
+ PrintNameOffset uint16
+ PrintNameLength uint16
+ PathBuffer [1]uint16
+}
+
+type symbolicLinkReparseBuffer struct {
+ SubstituteNameOffset uint16
+ SubstituteNameLength uint16
+ PrintNameOffset uint16
+ PrintNameLength uint16
+ Flags uint32
+ PathBuffer [1]uint16
+}
+
+const (
+ _IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
+ _SYMLINK_FLAG_RELATIVE = 1
+)
+
+// Readlink returns the destination of the named symbolic link.
+func Readlink(path string, buf []byte) (n int, err error) {
+ fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if err != nil {
+ return -1, err
+ }
+ defer syscall.CloseHandle(fd)
+
+ rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
+ var bytesReturned uint32
+ err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)
+ if err != nil {
+ return -1, err
+ }
+
+ rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
+ var s string
+ switch rdb.ReparseTag {
+ case syscall.IO_REPARSE_TAG_SYMLINK:
+ data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
+ p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
+ s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2])
+ if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 {
+ if len(s) >= 4 && s[:4] == `\??\` {
+ s = s[4:]
+ switch {
+ case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar
+ // do nothing
+ case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar
+ s = `\\` + s[4:]
+ default:
+ // unexpected; do nothing
+ }
+ } else {
+ // unexpected; do nothing
+ }
+ }
+ case _IO_REPARSE_TAG_MOUNT_POINT:
+ data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
+ p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
+ s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2])
+ if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar
+ if len(s) < 48 || s[:11] != `\??\Volume{` {
+ s = s[4:]
+ }
+ } else {
+ // unexpected; do nothing
+ }
+ default:
+ // the path is not a symlink or junction but another type of reparse
+ // point
+ return -1, syscall.ENOENT
+ }
+ n = copy(buf, []byte(s))
+
+ return n, nil
+}
diff --git a/vendor/github.com/containerd/continuity/sysx/README.md b/vendor/github.com/containerd/continuity/sysx/README.md
new file mode 100644
index 000000000..ad7aee533
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/README.md
@@ -0,0 +1,3 @@
+This package is for internal use only. It is intended to only have
+temporary changes before they are upstreamed to golang.org/x/sys/
+(a.k.a. https://github.com/golang/sys).
diff --git a/vendor/github.com/containerd/continuity/sysx/file_posix.go b/vendor/github.com/containerd/continuity/sysx/file_posix.go
new file mode 100644
index 000000000..e28f3a1b5
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/file_posix.go
@@ -0,0 +1,128 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sysx
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/containerd/continuity/syscallx"
+)
+
+// Readlink returns the destination of the named symbolic link.
+// If there is an error, it will be of type *PathError.
+func Readlink(name string) (string, error) {
+ for len := 128; ; len *= 2 {
+ b := make([]byte, len)
+ n, e := fixCount(syscallx.Readlink(fixLongPath(name), b))
+ if e != nil {
+ return "", &os.PathError{Op: "readlink", Path: name, Err: e}
+ }
+ if n < len {
+ return string(b[0:n]), nil
+ }
+ }
+}
+
+// Many functions in package syscall return a count of -1 instead of 0.
+// Using fixCount(call()) instead of call() corrects the count.
+func fixCount(n int, err error) (int, error) {
+ if n < 0 {
+ n = 0
+ }
+ return n, err
+}
+
+// fixLongPath returns the extended-length (\\?\-prefixed) form of
+// path when needed, in order to avoid the default 260 character file
+// path limit imposed by Windows. If path is not easily converted to
+// the extended-length form (for example, if path is a relative path
+// or contains .. elements), or is short enough, fixLongPath returns
+// path unmodified.
+//
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
+func fixLongPath(path string) string {
+ // Do nothing (and don't allocate) if the path is "short".
+ // Empirically (at least on the Windows Server 2013 builder),
+ // the kernel is arbitrarily okay with < 248 bytes. That
+ // matches what the docs above say:
+ // "When using an API to create a directory, the specified
+ // path cannot be so long that you cannot append an 8.3 file
+ // name (that is, the directory name cannot exceed MAX_PATH
+ // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
+ //
+ // The MSDN docs appear to say that a normal path that is 248 bytes long
+ // will work; empirically the path must be less then 248 bytes long.
+ if len(path) < 248 {
+ // Don't fix. (This is how Go 1.7 and earlier worked,
+ // not automatically generating the \\?\ form)
+ return path
+ }
+
+ // The extended form begins with \\?\, as in
+ // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt.
+ // The extended form disables evaluation of . and .. path
+ // elements and disables the interpretation of / as equivalent
+ // to \. The conversion here rewrites / to \ and elides
+ // . elements as well as trailing or duplicate separators. For
+ // simplicity it avoids the conversion entirely for relative
+ // paths or paths containing .. elements. For now,
+ // \\server\share paths are not converted to
+ // \\?\UNC\server\share paths because the rules for doing so
+ // are less well-specified.
+ if len(path) >= 2 && path[:2] == `\\` {
+ // Don't canonicalize UNC paths.
+ return path
+ }
+ if !filepath.IsAbs(path) {
+ // Relative path
+ return path
+ }
+
+ const prefix = `\\?`
+
+ pathbuf := make([]byte, len(prefix)+len(path)+len(`\`))
+ copy(pathbuf, prefix)
+ n := len(path)
+ r, w := 0, len(prefix)
+ for r < n {
+ switch {
+ case os.IsPathSeparator(path[r]):
+ // empty block
+ r++
+ case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
+ // /./
+ r++
+ case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // /../ is currently unhandled
+ return path
+ default:
+ pathbuf[w] = '\\'
+ w++
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ pathbuf[w] = path[r]
+ w++
+ }
+ }
+ }
+ // A drive's root directory needs a trailing \
+ if w == len(`\\?\c:`) {
+ pathbuf[w] = '\\'
+ w++
+ }
+ return string(pathbuf[:w])
+}
diff --git a/vendor/github.com/containerd/continuity/sysx/generate.sh b/vendor/github.com/containerd/continuity/sysx/generate.sh
new file mode 100644
index 000000000..87d708d7a
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/generate.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# Copyright The containerd Authors.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -e
+
+mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl"
+
+fix() {
+ sed 's,^package syscall$,package sysx,' \
+ | sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \
+ | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \
+ | gofmt -r='Syscall6 -> syscall.Syscall6' \
+ | gofmt -r='Syscall -> syscall.Syscall' \
+ | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \
+ | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \
+ | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \
+ | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \
+ | gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \
+ | gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \
+ | gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \
+ | gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR'
+}
+
+if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then
+ echo "Must specify \$GOARCH and \$GOOS"
+ exit 1
+fi
+
+mkargs=""
+
+if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then
+ mkargs="-l32"
+fi
+
+for f in "$@"; do
+ $mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go"
+done
+
diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go
new file mode 100644
index 000000000..28ce5d8de
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go
@@ -0,0 +1,23 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sysx
+
+import (
+ "syscall"
+)
+
+const ENODATA = syscall.ENODATA
diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go
new file mode 100644
index 000000000..e0575f446
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go
@@ -0,0 +1,24 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sysx
+
+import (
+ "syscall"
+)
+
+// This should actually be a set that contains ENOENT and EPERM
+const ENODATA = syscall.ENOENT
diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go
new file mode 100644
index 000000000..b26f5b3d0
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go
@@ -0,0 +1,25 @@
+// +build darwin freebsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sysx
+
+import (
+ "syscall"
+)
+
+const ENODATA = syscall.ENOATTR
diff --git a/vendor/github.com/containerd/continuity/sysx/xattr.go b/vendor/github.com/containerd/continuity/sysx/xattr.go
new file mode 100644
index 000000000..9e4326dcf
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/xattr.go
@@ -0,0 +1,125 @@
+// +build linux darwin
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sysx
+
+import (
+ "bytes"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// Listxattr calls syscall listxattr and reads all content
+// and returns a string array
+func Listxattr(path string) ([]string, error) {
+ return listxattrAll(path, unix.Listxattr)
+}
+
+// Removexattr calls syscall removexattr
+func Removexattr(path string, attr string) (err error) {
+ return unix.Removexattr(path, attr)
+}
+
+// Setxattr calls syscall setxattr
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+ return unix.Setxattr(path, attr, data, flags)
+}
+
+// Getxattr calls syscall getxattr
+func Getxattr(path, attr string) ([]byte, error) {
+ return getxattrAll(path, attr, unix.Getxattr)
+}
+
+// LListxattr lists xattrs, not following symlinks
+func LListxattr(path string) ([]string, error) {
+ return listxattrAll(path, unix.Llistxattr)
+}
+
+// LRemovexattr removes an xattr, not following symlinks
+func LRemovexattr(path string, attr string) (err error) {
+ return unix.Lremovexattr(path, attr)
+}
+
+// LSetxattr sets an xattr, not following symlinks
+func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
+ return unix.Lsetxattr(path, attr, data, flags)
+}
+
+// LGetxattr gets an xattr, not following symlinks
+func LGetxattr(path, attr string) ([]byte, error) {
+ return getxattrAll(path, attr, unix.Lgetxattr)
+}
+
+const defaultXattrBufferSize = 5
+
+type listxattrFunc func(path string, dest []byte) (int, error)
+
+func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) {
+ var p []byte // nil on first execution
+
+ for {
+ n, err := listFunc(path, p) // first call gets buffer size.
+ if err != nil {
+ return nil, err
+ }
+
+ if n > len(p) {
+ p = make([]byte, n)
+ continue
+ }
+
+ p = p[:n]
+
+ ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0})
+ var entries []string
+ for _, p := range ps {
+ s := string(p)
+ if s != "" {
+ entries = append(entries, s)
+ }
+ }
+
+ return entries, nil
+ }
+}
+
+type getxattrFunc func(string, string, []byte) (int, error)
+
+func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) {
+ p := make([]byte, defaultXattrBufferSize)
+ for {
+ n, err := getFunc(path, attr, p)
+ if err != nil {
+ if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE {
+ p = make([]byte, len(p)*2) // this can't be ideal.
+ continue // try again!
+ }
+
+ return nil, err
+ }
+
+ // realloc to correct size and repeat
+ if n > len(p) {
+ p = make([]byte, n)
+ continue
+ }
+
+ return p[:n], nil
+ }
+}
diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go
new file mode 100644
index 000000000..c9ef3a1d2
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go
@@ -0,0 +1,67 @@
+// +build !linux,!darwin
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sysx
+
+import (
+ "errors"
+ "runtime"
+)
+
+var unsupported = errors.New("extended attributes unsupported on " + runtime.GOOS)
+
+// Listxattr calls syscall listxattr and reads all content
+// and returns a string array
+func Listxattr(path string) ([]string, error) {
+ return []string{}, nil
+}
+
+// Removexattr calls syscall removexattr
+func Removexattr(path string, attr string) (err error) {
+ return unsupported
+}
+
+// Setxattr calls syscall setxattr
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+ return unsupported
+}
+
+// Getxattr calls syscall getxattr
+func Getxattr(path, attr string) ([]byte, error) {
+ return []byte{}, unsupported
+}
+
+// LListxattr lists xattrs, not following symlinks
+func LListxattr(path string) ([]string, error) {
+ return []string{}, nil
+}
+
+// LRemovexattr removes an xattr, not following symlinks
+func LRemovexattr(path string, attr string) (err error) {
+ return unsupported
+}
+
+// LSetxattr sets an xattr, not following symlinks
+func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
+ return unsupported
+}
+
+// LGetxattr gets an xattr, not following symlinks
+func LGetxattr(path, attr string) ([]byte, error) {
+ return []byte{}, nil
+}
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index 0db48932c..c75a055a6 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,6 +2,32 @@
# Changelog
+## v1.11.3 (2019-10-04)
+ Update c/image to v4.0.1
+ Bump github.com/spf13/pflag from 1.0.3 to 1.0.5
+ Fix --build-args handling
+ Bump github.com/spf13/cobra from 0.0.3 to 0.0.5
+ Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2
+ Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1
+ Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4
+ Add support for retrieving context from stdin "-"
+ Ensure bud remote context cleans up on error
+ info: add cgroups2
+ Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1
+ Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6
+ Bump github.com/stretchr/testify from 1.3.0 to 1.4.0
+ Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0
+ Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3
+ Bump github.com/onsi/gomega from 1.5.0 to 1.7.0
+ update c/storage to v1.13.4
+ Print build 'STEP' line to stdout, not stderr
+ Fix travis-ci on forks
+ Vendor c/storage v1.13.3
+ Use Containerfile by default
+ Added tutorial on how to include Buildah as library
+ util/util: Fix "configuraitno" -> "configuration" log typo
+ Bump back to v1.12.0-dev
+
## v1.11.2 (2019-09-13)
Add some cleanup code
Move devices code to unit specific directory.
diff --git a/vendor/github.com/containers/buildah/OWNERS b/vendor/github.com/containers/buildah/OWNERS
new file mode 100644
index 000000000..5e6cbfdfa
--- /dev/null
+++ b/vendor/github.com/containers/buildah/OWNERS
@@ -0,0 +1,22 @@
+approvers:
+ - TomSweeneyRedHat
+ - cevich
+ - giuseppe
+ - nalind
+ - rhatdan
+ - vrothberg
+reviewers:
+ - QiWang19
+ - TomSweeneyRedHat
+ - baude
+ - cevich
+ - edsantiago
+ - giuseppe
+ - haircommander
+ - jwhonce
+ - mheon
+ - mrunalp
+ - nalind
+ - rhatdan
+ - umohnani8
+ - vrothberg
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 6f974ba86..59b62925a 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -12,7 +12,7 @@ import (
"github.com/containers/buildah/docker"
"github.com/containers/buildah/util"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -27,7 +27,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.11.3"
+ Version = "1.11.4"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 359ff5227..6e98e5405 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,25 @@
+- Changelog for v1.11.4 (2019-10-28)
+ * buildah: add a "manifest" command
+ * manifests: add the module
+ * pkg/supplemented: add a package for grouping images together
+ * pkg/manifests: add a manifest list build/manipulation API
+ * Update for ErrUnauthorizedForCredentials API change in containers/image
+ * Update for manifest-lists API changes in containers/image
+ * version: also note the version of containers/image
+ * Move to containers/image v5.0.0
+ * Enable --device directory as src device
+ * Fix git build with branch specified
+ * Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1
+ * Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0
+ * Add clarification to the Tutorial for new users
+ * Silence "using cache" to ensure -q is fully quiet
+ * Add OWNERS File to Buildah
+ * Bump github.com/containers/storage from 1.13.4 to 1.13.5
+ * Move runtime flag to bud from common
+ * Commit: check for storage.ErrImageUnknown using errors.Cause()
+ * Fix crash when invalid COPY --from flag is specified.
+ * Bump back to v1.12.0-dev
+
- Changelog for v1.11.3 (2019-10-04)
* Update c/image to v4.0.1
* Bump github.com/spf13/pflag from 1.0.3 to 1.0.5
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index f46609525..4df3b9908 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -12,14 +12,14 @@ import (
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
- cp "github.com/containers/image/v4/copy"
- "github.com/containers/image/v4/docker"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/signature"
- is "github.com/containers/image/v4/storage"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/stringid"
@@ -96,7 +96,7 @@ type PushOptions struct {
// github.com/containers/image/types SystemContext to hold credentials
// and other authentication/authorization information.
SystemContext *types.SystemContext
- // ManifestType is the format to use when saving the imge using the 'dir' transport
+ // ManifestType is the format to use when saving the image using the 'dir' transport
// possible options are oci, v2s1, and v2s2
ManifestType string
// BlobDirectory is the name of a directory in which we'll look for
@@ -309,7 +309,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
img, err := is.Transport.GetStoreImage(b.store, dest)
- if err != nil && err != storage.ErrImageUnknown {
+ if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
return imgID, nil, "", errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(dest))
}
if err == nil {
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index a8b29231d..d2e9dc732 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -6,8 +6,8 @@ import (
"path/filepath"
"github.com/containers/buildah/pkg/unshare"
- cp "github.com/containers/image/v4/copy"
- "github.com/containers/image/v4/types"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
)
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 49b1930c5..617619e45 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -8,9 +8,9 @@ import (
"time"
"github.com/containers/buildah/docker"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/stringid"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go
index 4b62e0e31..561287ac2 100644
--- a/vendor/github.com/containers/buildah/docker/types.go
+++ b/vendor/github.com/containers/buildah/docker/types.go
@@ -7,7 +7,7 @@ package docker
import (
"time"
- "github.com/containers/image/v4/pkg/strslice"
+ "github.com/containers/image/v5/pkg/strslice"
digest "github.com/opencontainers/go-digest"
)
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 0bd592d48..6bba4a1f8 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -5,8 +5,8 @@ go 1.12
require (
github.com/blang/semver v3.5.0+incompatible // indirect
github.com/containernetworking/cni v0.7.1
- github.com/containers/image/v4 v4.0.1
- github.com/containers/storage v1.13.4
+ github.com/containers/image/v5 v5.0.0
+ github.com/containers/storage v1.13.5
github.com/cyphar/filepath-securejoin v0.2.2
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker-credential-helpers v0.6.1 // indirect
@@ -14,7 +14,7 @@ require (
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
github.com/etcd-io/bbolt v1.3.3
- github.com/fsouza/go-dockerclient v1.4.4
+ github.com/fsouza/go-dockerclient v1.5.0
github.com/ghodss/yaml v1.0.0
github.com/hashicorp/go-multierror v1.0.0
github.com/imdario/mergo v0.3.6 // indirect
@@ -30,7 +30,7 @@ require (
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.3.0
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible
- github.com/openshift/imagebuilder v1.1.0
+ github.com/openshift/imagebuilder v1.1.1
github.com/pkg/errors v0.8.1
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4
github.com/seccomp/libseccomp-golang v0.9.1
@@ -42,7 +42,7 @@ require (
github.com/vishvananda/netlink v1.0.0 // indirect
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect
github.com/xeipuuv/gojsonschema v1.1.0 // indirect
- golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
+ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 // indirect
google.golang.org/grpc v1.24.0 // indirect
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 6ebb9f91f..e5ce6a290 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -28,21 +28,23 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M=
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE=
-github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
-github.com/containers/image v4.0.0+incompatible h1:CfKbemfowbIg3nhq8rvtI+sdU9QbvODkiD+JLpOJMiQ=
-github.com/containers/image v4.0.0+incompatible/go.mod h1:Td6tqqQu0miIBO8mauyzsVqBbv5WhKSE4pH2ZwslVp0=
github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ=
github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA=
+github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4=
+github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/storage v1.13.4 h1:j0bBaJDKbUHtAW1MXPFnwXJtqcH+foWeuXK1YaBV5GA=
github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
+github.com/containers/storage v1.13.5 h1:/SUzGeOP2HDijpF7Yur21Ch6WTZC1BNeZF917CWcp5c=
+github.com/containers/storage v1.13.5/go.mod h1:HELz8Sn+UVbPaUZMI8RvIG9doD4y4z6Gtg4k7xdd2ZY=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -60,6 +62,8 @@ github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5N
github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU=
github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA=
+github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g=
github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
@@ -84,6 +88,8 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsouza/go-dockerclient v1.4.4 h1:Sd5nD4wdAgiPxvrbYUzT2ZZNmPk3z+GGnZ+frvw8z04=
github.com/fsouza/go-dockerclient v1.4.4/go.mod h1:PrwszSL5fbmsESocROrOGq/NULMXRw+bajY0ltzD6MA=
+github.com/fsouza/go-dockerclient v1.5.0 h1:7OtayOe5HnoG+KWMHgyyPymwaodnB2IDYuVfseKyxbA=
+github.com/fsouza/go-dockerclient v1.5.0/go.mod h1:AqZZK/zFO3phxYxlTsAaeAMSdQ9mgHuhy+bjN034Qds=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
@@ -101,6 +107,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -190,16 +197,11 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4=
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA=
-github.com/mtrmac/image/v4 v4.0.0-20191001213151-121ffca6db69 h1:TVWS7od6UeGhdYqgXn/+EIDlulkGGV+r6FnjoxRJAl0=
-github.com/mtrmac/image/v4 v4.0.0-20191001213151-121ffca6db69/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA=
-github.com/mtrmac/image/v4 v4.0.0-20191002203927-a64d9d2717f4 h1:AE5cilZfrGtAgMg5Ed4c2Y2KczlOsMVZAK055sSq+gc=
-github.com/mtrmac/image/v4 v4.0.0-20191002203927-a64d9d2717f4/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA=
-github.com/mtrmac/image/v4 v4.0.0-20191003181245-f4c983e93262 h1:HMUEnWU3OPT09JRFQLn8VTp3GfdfiEhDMAEhkdX8QnA=
-github.com/mtrmac/image/v4 v4.0.0-20191003181245-f4c983e93262/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
@@ -238,6 +240,8 @@ github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s5
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY=
github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYMeFI18c4=
github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
+github.com/openshift/imagebuilder v1.1.1 h1:KAUR31p8UBJdfVO42azWgb+LeMAed2zaKQ19e0C0X2I=
+github.com/openshift/imagebuilder v1.1.1/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -299,8 +303,6 @@ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
-github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
@@ -329,6 +331,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90Pveol
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ=
+golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -390,6 +394,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 829d8c21d..79c75ce0b 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -13,11 +13,11 @@ import (
"time"
"github.com/containers/buildah/docker"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/manifest"
- is "github.com/containers/image/v4/storage"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
@@ -596,7 +596,7 @@ func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *
return i.manifest, i.manifestType, nil
}
-func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
return nil, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index fe6cc266a..6b2c9c84c 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -13,8 +13,8 @@ import (
"strings"
"github.com/containers/buildah"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/runc/libcontainer/configs"
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index 136261bf0..27ec1bb23 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -12,11 +12,11 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/util"
- "github.com/containers/image/v4/docker/reference"
- is "github.com/containers/image/v4/storage"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/transports/alltransports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 114d250a4..fad2bfe95 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -13,12 +13,12 @@ import (
"github.com/containers/buildah"
buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/util"
- cp "github.com/containers/image/v4/copy"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- is "github.com/containers/image/v4/storage"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
securejoin "github.com/cyphar/filepath-securejoin"
@@ -759,6 +759,12 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
s.executor.log(commitMessage)
}
}
+ logCacheHit := func(cacheID string) {
+ if !s.executor.quiet {
+ cacheHitMessage := "--> Using cache"
+ fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, cacheID)
+ }
+ }
logImageID := func(imgID string) {
if s.executor.iidfile == "" {
fmt.Fprintf(s.executor.out, "%s\n", imgID)
@@ -816,6 +822,9 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
if strings.Contains(n, "--from") && (command == "COPY" || command == "ADD") {
var mountPoint string
arr := strings.Split(n, "=")
+ if len(arr) != 2 {
+ return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|index>", command)
+ }
otherStage, ok := s.executor.stages[arr[1]]
if !ok {
if mountPoint, err = s.getImageRootfs(ctx, stage, arr[1]); err != nil {
@@ -906,7 +915,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
}
if cacheID != "" {
// Note the cache hit.
- fmt.Fprintf(s.executor.out, "--> Using cache %s\n", cacheID)
+ logCacheHit(cacheID)
} else {
// We're not going to find any more cache hits.
checkForLayers = false
diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go
index 7a94d9974..520b92e3f 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/util.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/util.go
@@ -23,8 +23,15 @@ func cloneToDirectory(url, dir string) error {
if !strings.HasPrefix(url, "git://") && !strings.HasSuffix(url, ".git") {
url = "git://" + url
}
- logrus.Debugf("cloning %q to %q", url, dir)
- cmd := exec.Command("git", "clone", url, dir)
+ gitBranch := strings.Split(url, "#")
+ var cmd *exec.Cmd
+ if len(gitBranch) < 2 {
+ logrus.Debugf("cloning %q to %q", url, dir)
+ cmd = exec.Command("git", "clone", url, dir)
+ } else {
+ logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir)
+ cmd = exec.Command("git", "clone", "-b", gitBranch[1], gitBranch[0], dir)
+ }
return cmd.Run()
}
diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go
index 4d3059527..751ce6ae1 100644
--- a/vendor/github.com/containers/buildah/import.go
+++ b/vendor/github.com/containers/buildah/import.go
@@ -5,9 +5,9 @@ import (
"github.com/containers/buildah/docker"
"github.com/containers/buildah/util"
- "github.com/containers/image/v4/manifest"
- is "github.com/containers/image/v4/storage"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 216a96611..87cfd5d0d 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -7,12 +7,12 @@ import (
"strings"
"github.com/containers/buildah/util"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/pkg/sysregistriesv2"
- is "github.com/containers/image/v4/storage"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/transports/alltransports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/openshift/imagebuilder"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
index 539c894a3..b7f704615 100644
--- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
+++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
@@ -10,11 +10,11 @@ import (
"sync"
"github.com/containers/buildah/docker"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
@@ -263,14 +263,14 @@ func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *dig
return s.source.GetSignatures(ctx, instanceDigest)
}
-func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- signatures, err := s.source.GetSignatures(ctx, nil)
+func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ signatures, err := s.source.GetSignatures(ctx, instanceDigest)
if err != nil {
return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference))
}
canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0)
- infos, err := s.source.LayerInfosForCopy(ctx)
+ infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest)
if err != nil {
return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference))
}
@@ -515,7 +515,7 @@ func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.Bl
return false, types.BlobInfo{}, nil
}
-func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte) error {
+func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error {
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
@@ -525,13 +525,13 @@ func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []
logrus.Warnf("error saving manifest as %q: %v", filename, err)
}
}
- return d.destination.PutManifest(ctx, manifestBytes)
+ return d.destination.PutManifest(ctx, manifestBytes, instanceDigest)
}
-func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
- return d.destination.PutSignatures(ctx, signatures)
+func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ return d.destination.PutSignatures(ctx, signatures, instanceDigest)
}
-func (d *blobCacheDestination) Commit(ctx context.Context) error {
- return d.destination.Commit(ctx)
+func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ return d.destination.Commit(ctx, unparsedToplevel)
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 06aec96ea..a9bf94a32 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -8,6 +8,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "runtime"
"strings"
"github.com/containers/buildah"
@@ -95,6 +96,8 @@ type FromAndBudResults struct {
Isolation string
Memory string
MemorySwap string
+ OverrideArch string
+ OverrideOS string
SecurityOpt []string
ShmSize string
Ulimit []string
@@ -161,7 +164,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image, even if a version is present")
fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build")
- fs.StringVar(&flags.Runtime, "runtime", util.Runtime(), "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.")
+ // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/bud.go.
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer.")
@@ -194,6 +197,14 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
+ fs.StringVar(&flags.OverrideOS, "override-os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images")
+ if err := fs.MarkHidden("override-os"); err != nil {
+ panic(fmt.Sprintf("error marking override-os as hidden: %v", err))
+ }
+ fs.StringVar(&flags.OverrideArch, "override-arch", runtime.GOARCH, "prefer `ARCH` instead of the architecture of the machine when pulling images")
+ if err := fs.MarkHidden("override-arch"); err != nil {
+ panic(fmt.Sprintf("error marking override-arch as hidden: %v", err))
+ }
fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
fs.StringVar(&flags.ShmSize, "shm-size", "65536k", "size of '/dev/shm'. The format is `<number><unit>`.")
fs.StringSliceVar(&flags.Ulimit, "ulimit", []string{}, "ulimit options (default [])")
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index f0996315a..9194ddf58 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -14,7 +14,7 @@ import (
"unicode"
"github.com/containers/buildah"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/idtools"
units "github.com/docker/go-units"
specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -583,6 +583,12 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
ctx.RegistriesDirPath = regConfDir
}
ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", buildah.Version)
+ if os, err := c.Flags().GetString("override-os"); err == nil {
+ ctx.OSChoice = os
+ }
+ if arch, err := c.Flags().GetString("override-arch"); err == nil {
+ ctx.ArchitectureChoice = arch
+ }
return ctx, nil
}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
index 238293894..1aaeca278 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
@@ -4,6 +4,8 @@ package parse
import (
"fmt"
+ "os"
+ "path/filepath"
"github.com/containers/buildah/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/configs"
@@ -24,18 +26,40 @@ func getDefaultProcessLimits() []string {
return defaultLimits
}
-func DeviceFromPath(device string) (configs.Device, error) {
+func DeviceFromPath(device string) ([]configs.Device, error) {
+ var devs []configs.Device
src, dst, permissions, err := Device(device)
if err != nil {
- return configs.Device{}, err
+ return nil, err
}
if unshare.IsRootless() {
- return configs.Device{}, errors.Errorf("Renaming device %s to %s is not a supported in rootless containers", src, dst)
+ return nil, errors.Errorf("Renaming device %s to %s is not a supported in rootless containers", src, dst)
}
- dev, err := devices.DeviceFromPath(src, permissions)
+ srcInfo, err := os.Stat(src)
if err != nil {
- return configs.Device{}, errors.Wrapf(err, "%s is not a valid device", src)
+ return nil, errors.Wrapf(err, "error getting info of source device %s", src)
}
- dev.Path = dst
- return *dev, nil
+
+ if !srcInfo.IsDir() {
+
+ dev, err := devices.DeviceFromPath(src, permissions)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%s is not a valid device", src)
+ }
+ dev.Path = dst
+ devs = append(devs, *dev)
+ return devs, nil
+ }
+
+ // If source device is a directory
+ srcDevices, err := devices.GetDevices(src)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting source devices from directory %s", src)
+ }
+ for _, d := range srcDevices {
+ d.Path = filepath.Join(dst, filepath.Base(d.Path))
+ d.Permissions = permissions
+ devs = append(devs, *d)
+ }
+ return devs, nil
}
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 60dc3693f..300f3b396 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -8,18 +8,18 @@ import (
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
- cp "github.com/containers/image/v4/copy"
- "github.com/containers/image/v4/directory"
- "github.com/containers/image/v4/docker"
- dockerarchive "github.com/containers/image/v4/docker/archive"
- "github.com/containers/image/v4/docker/reference"
- tarfile "github.com/containers/image/v4/docker/tarfile"
- ociarchive "github.com/containers/image/v4/oci/archive"
- oci "github.com/containers/image/v4/oci/layout"
- "github.com/containers/image/v4/signature"
- is "github.com/containers/image/v4/storage"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/directory"
+ "github.com/containers/image/v5/docker"
+ dockerarchive "github.com/containers/image/v5/docker/archive"
+ "github.com/containers/image/v5/docker/reference"
+ tarfile "github.com/containers/image/v5/docker/tarfile"
+ ociarchive "github.com/containers/image/v5/oci/archive"
+ oci "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/signature"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 06492fad2..44db18b45 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -8,9 +8,9 @@ import (
"path/filepath"
"github.com/containers/buildah/util"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/pkg/sysregistriesv2"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index a572d1405..d5e842315 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -10,12 +10,12 @@ import (
"syscall"
"github.com/containers/buildah/pkg/cgroups"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/pkg/sysregistriesv2"
- "github.com/containers/image/v4/signature"
- is "github.com/containers/image/v4/storage"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/signature"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/docker/distribution/registry/api/errcode"
specs "github.com/opencontainers/runtime-spec/specs-go"
diff --git a/vendor/github.com/containers/image/v4/image/docker_list.go b/vendor/github.com/containers/image/v4/image/docker_list.go
deleted file mode 100644
index a11cd06b9..000000000
--- a/vendor/github.com/containers/image/v4/image/docker_list.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package image
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "runtime"
-
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
-)
-
-type platformSpec struct {
- Architecture string `json:"architecture"`
- OS string `json:"os"`
- OSVersion string `json:"os.version,omitempty"`
- OSFeatures []string `json:"os.features,omitempty"`
- Variant string `json:"variant,omitempty"`
- Features []string `json:"features,omitempty"` // removed in OCI
-}
-
-// A manifestDescriptor references a platform-specific manifest.
-type manifestDescriptor struct {
- manifest.Schema2Descriptor
- Platform platformSpec `json:"platform"`
-}
-
-type manifestList struct {
- SchemaVersion int `json:"schemaVersion"`
- MediaType string `json:"mediaType"`
- Manifests []manifestDescriptor `json:"manifests"`
-}
-
-// chooseDigestFromManifestList parses blob as a schema2 manifest list,
-// and returns the digest of the image appropriate for the current environment.
-func chooseDigestFromManifestList(sys *types.SystemContext, blob []byte) (digest.Digest, error) {
- wantedArch := runtime.GOARCH
- if sys != nil && sys.ArchitectureChoice != "" {
- wantedArch = sys.ArchitectureChoice
- }
- wantedOS := runtime.GOOS
- if sys != nil && sys.OSChoice != "" {
- wantedOS = sys.OSChoice
- }
-
- list := manifestList{}
- if err := json.Unmarshal(blob, &list); err != nil {
- return "", err
- }
- for _, d := range list.Manifests {
- if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS {
- return d.Digest, nil
- }
- }
- return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS)
-}
-
-func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
- targetManifestDigest, err := chooseDigestFromManifestList(sys, manblob)
- if err != nil {
- return nil, err
- }
- manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
- if err != nil {
- return nil, err
- }
-
- matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
- if err != nil {
- return nil, errors.Wrap(err, "Error computing manifest digest")
- }
- if !matches {
- return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
- }
-
- return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
-}
-
-// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate
-// for the current system from the manifest available from src.
-func ChooseManifestInstanceFromManifestList(ctx context.Context, sys *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) {
- // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later,
- // probably along with manifest list editing.
- blob, mt, err := src.Manifest(ctx)
- if err != nil {
- return "", err
- }
- if mt != manifest.DockerV2ListMediaType {
- return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt)
- }
- return chooseDigestFromManifestList(sys, blob)
-}
diff --git a/vendor/github.com/containers/image/v4/LICENSE b/vendor/github.com/containers/image/v5/LICENSE
index 953563530..953563530 100644
--- a/vendor/github.com/containers/image/v4/LICENSE
+++ b/vendor/github.com/containers/image/v5/LICENSE
diff --git a/vendor/github.com/containers/image/v4/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 30d8a4464..090d862d5 100644
--- a/vendor/github.com/containers/image/v4/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -13,15 +13,16 @@ import (
"sync"
"time"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/pkg/blobinfocache"
- "github.com/containers/image/v4/pkg/compression"
- "github.com/containers/image/v4/signature"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb"
@@ -110,6 +111,37 @@ type imageCopier struct {
canSubstituteBlobs bool
}
+const (
+ // CopySystemImage is the default value which, when set in
+ // Options.ImageListSelection, indicates that the caller expects only one
+ // image to be copied, so if the source reference refers to a list of
+ // images, one that matches the current system will be selected.
+ CopySystemImage ImageListSelection = iota
+ // CopyAllImages is a value which, when set in Options.ImageListSelection,
+ // indicates that the caller expects to copy multiple images, and if
+ // the source reference refers to a list, that the list and every image
+ // to which it refers will be copied. If the source reference refers
+ // to a list, the target reference can not accept lists, an error
+ // should be returned.
+ CopyAllImages
+ // CopySpecificImages is a value which, when set in
+ // Options.ImageListSelection, indicates that the caller expects the
+ // source reference to be either a single image or a list of images,
+ // and if the source reference is a list, wants only specific instances
+ // from it copied (or none of them, if the list of instances to copy is
+ // empty), along with the list itself. If the target reference can
+ // only accept one image (i.e., it cannot accept lists), an error
+ // should be returned.
+ CopySpecificImages
+)
+
+// ImageListSelection is one of CopySystemImage, CopyAllImages, or
+// CopySpecificImages, to control whether, when the source reference is a list,
+// copy.Image() copies only an image which matches the current runtime
+// environment, or all images which match the supplied reference, or only
+// specific images from the source reference.
+type ImageListSelection int
+
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
type Options struct {
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
@@ -121,12 +153,24 @@ type Options struct {
Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
// manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
ForceManifestMIMEType string
+ ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list
+ Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself
+}
+
+// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
+func validateImageListSelection(selection ImageListSelection) error {
+ switch selection {
+ case CopySystemImage, CopyAllImages, CopySpecificImages:
+ return nil
+ default:
+ return errors.Errorf("Invalid value for options.ImageListSelection: %d", selection)
+ }
}
// Image copies image from srcRef to destRef, using policyContext to validate
// source image admissibility. It returns the manifest which was written to
// the new copy of the image.
-func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) {
+func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) {
// NOTE this function uses an output parameter for the error return value.
// Setting this and returning is the ideal way to return an error.
//
@@ -136,6 +180,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
options = &Options{}
}
+ if err := validateImageListSelection(options.ImageListSelection); err != nil {
+ return nil, err
+ }
+
reportWriter := ioutil.Discard
if options.ReportWriter != nil {
@@ -206,79 +254,278 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
if !multiImage {
- // The simple case: Just copy a single image.
- if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil {
+ // The simple case: just copy a single image.
+ if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil {
return nil, err
}
- } else {
- // This is a manifest list. Choose a single image and copy it.
- // FIXME: Copy to destinations which support manifest lists, one image at a time.
- instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel)
+ } else if options.ImageListSelection == CopySystemImage {
+ // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
+ // matches the current system to copy, and copy it.
+ mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error reading manifest for %s", transports.ImageName(srcRef))
+ }
+ manifestList, err := manifest.ListFromBlob(mfest, manifestType)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing primary manifest as list for %s", transports.ImageName(srcRef))
+ }
+ instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx
if err != nil {
return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
}
- logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest)
+ logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
- if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil {
+ if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil {
+ return nil, err
+ }
+ } else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
+ // If we were asked to copy multiple images and can't, that's an error.
+ if !supportsMultipleImages(c.dest) {
+ return nil, errors.Errorf("Error copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
+ }
+ // Copy some or all of the images.
+ switch options.ImageListSelection {
+ case CopyAllImages:
+ logrus.Debugf("Source is a manifest list; copying all instances")
+ case CopySpecificImages:
+ logrus.Debugf("Source is a manifest list; copying some instances")
+ }
+ if copiedManifest, _, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
return nil, err
}
}
- if err := c.dest.Commit(ctx); err != nil {
+ if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
return nil, errors.Wrap(err, "Error committing the finished image")
}
- return manifest, nil
+ return copiedManifest, nil
}
-// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
+// Checks if the destination supports accepting multiple images by checking if it can support
+// manifest types that are lists of other manifests.
+func supportsMultipleImages(dest types.ImageDestination) bool {
+ mtypes := dest.SupportedManifestMIMETypes()
+ if len(mtypes) == 0 {
+ // Anything goes!
+ return true
+ }
+ for _, mtype := range mtypes {
+ if manifest.MIMETypeIsMultiImage(mtype) {
+ return true
+ }
+ }
+ return false
+}
+
+// copyMultipleImages copies some or all of an image list's instances, using
+// policyContext to validate source image admissibility.
+func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, copiedManifestType string, retErr error) {
+ // Parse the list and get a copy of the original value after it's re-encoded.
+ manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "Error reading manifest list")
+ }
+ list, err := manifest.ListFromBlob(manifestList, manifestType)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList))
+ }
+ originalList := list.Clone()
+
+ // Read and/or clear the set of signatures for this list.
+ var sigs [][]byte
+ if options.RemoveSignatures {
+ sigs = [][]byte{}
+ } else {
+ c.Printf("Getting image list signatures\n")
+ s, err := c.rawSource.GetSignatures(ctx, nil)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "Error reading signatures")
+ }
+ sigs = s
+ }
+ if len(sigs) != 0 {
+ c.Printf("Checking if image list destination supports signatures\n")
+ if err := c.dest.SupportsSignatures(ctx); err != nil {
+ return nil, "", errors.Wrap(err, "Can not copy signatures")
+ }
+ }
+
+ // Determine if we'll need to convert the manifest list to a different format.
+ forceListMIMEType := options.ForceManifestMIMEType
+ switch forceListMIMEType {
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ forceListMIMEType = manifest.DockerV2ListMediaType
+ case imgspecv1.MediaTypeImageManifest:
+ forceListMIMEType = imgspecv1.MediaTypeImageIndex
+ }
+ selectedListType, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination")
+ }
+ if selectedListType != list.MIMEType() {
+ canModifyManifestList := (len(sigs) == 0)
+ if !canModifyManifestList {
+ return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
+ }
+ }
+
+ // Copy each image, or just the ones we want to copy, in turn.
+ instanceDigests := list.Instances()
+ imagesToCopy := len(instanceDigests)
+ if options.ImageListSelection == CopySpecificImages {
+ imagesToCopy = len(options.Instances)
+ }
+ c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests))
+ updates := make([]manifest.ListUpdate, len(instanceDigests))
+ instancesCopied := 0
+ for i, instanceDigest := range instanceDigests {
+ if options.ImageListSelection == CopySpecificImages {
+ skip := true
+ for _, instance := range options.Instances {
+ if instance == instanceDigest {
+ skip = false
+ break
+ }
+ }
+ if skip {
+ update, err := list.Instance(instanceDigest)
+ if err != nil {
+ return nil, "", err
+ }
+ logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
+ // Record the digest/size/type of the manifest that we didn't copy.
+ updates[i] = update
+ continue
+ }
+ }
+ logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
+ c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy)
+ unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
+ updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
+ if err != nil {
+ return nil, "", err
+ }
+ instancesCopied++
+ // Record the result of a possible conversion here.
+ update := manifest.ListUpdate{
+ Digest: updatedManifestDigest,
+ Size: int64(len(updatedManifest)),
+ MediaType: updatedManifestType,
+ }
+ updates[i] = update
+ }
+
+ // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
+ if err = list.UpdateInstances(updates); err != nil {
+ return nil, "", errors.Wrapf(err, "Error updating manifest list")
+ }
+
+ // Check if the updates meaningfully changed the list of images.
+ listIsModified := false
+ if !reflect.DeepEqual(list.Instances(), originalList.Instances()) {
+ listIsModified = true
+ }
+
+ // Perform the list conversion.
+ if selectedListType != list.MIMEType() {
+ list, err = list.ConvertToMIMEType(selectedListType)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", selectedListType)
+ }
+ }
+
+ // If we can't use the original value, but we have to change it, flag an error.
+ if listIsModified {
+ manifestList, err = list.Serialize()
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", list.MIMEType(), list.Instances())
+ }
+ logrus.Debugf("Manifest list has been updated")
+ }
+
+ // Save the manifest list.
+ c.Printf("Writing manifest list to image destination\n")
+ if err = c.dest.PutManifest(ctx, manifestList, nil); err != nil {
+ return nil, "", errors.Wrapf(err, "Error writing manifest list %q", string(manifestList))
+ }
+
+ // Sign the manifest list.
+ if options.SignBy != "" {
+ newSig, err := c.createSignature(manifestList, options.SignBy)
+ if err != nil {
+ return nil, "", err
+ }
+ sigs = append(sigs, newSig)
+ }
+
+ c.Printf("Storing list signatures\n")
+ if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil {
+ return nil, "", errors.Wrap(err, "Error writing signatures")
+ }
+
+ return manifestList, selectedListType, nil
+}
+
+// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
// source image admissibility.
-func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) {
+func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) {
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
// Make sure we fail cleanly in such cases.
multiImage, err := isMultiImage(ctx, unparsedImage)
if err != nil {
// FIXME FIXME: How to name a reference for the sub-image?
- return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
+ return nil, "", "", errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
}
if multiImage {
- return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
+ return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
}
// Please keep this policy check BEFORE reading any other information about the image.
- // (the multiImage check above only matches the MIME type, which we have received anyway.
+ // (The multiImage check above only matches the MIME type, which we have received anyway.
// Actual parsing of anything should be deferred.)
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
- return nil, errors.Wrap(err, "Source image rejected")
+ return nil, "", "", errors.Wrap(err, "Source image rejected")
}
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
if err != nil {
- return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
+ return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
- // expecting, and check that the source manifest matches it.
+ // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
+ // one item from a manifest list that matches it, accept that as a match.
destIsDigestedReference := false
if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true
sourceManifest, _, err := src.Manifest(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "Error reading manifest from source image")
+ return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image")
}
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
if err != nil {
- return nil, errors.Wrapf(err, "Error computing digest of source image's manifest")
+ return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest")
}
if !matches {
- return nil, errors.New("Digest of source image's manifest would not match destination reference")
+ manifestList, _, err := unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image")
+ }
+ matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
+ if err != nil {
+ return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest")
+ }
+ if !matches {
+ return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
+ }
}
}
}
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
- return nil, err
+ return nil, "", "", err
}
var sigs [][]byte
@@ -288,14 +535,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
c.Printf("Getting image source signatures\n")
s, err := src.Signatures(ctx)
if err != nil {
- return nil, errors.Wrap(err, "Error reading signatures")
+ return nil, "", "", errors.Wrap(err, "Error reading signatures")
}
sigs = s
}
if len(sigs) != 0 {
c.Printf("Checking if image destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, errors.Wrap(err, "Can not copy signatures")
+ return nil, "", "", errors.Wrap(err, "Can not copy signatures")
}
}
@@ -315,28 +562,29 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
if err := ic.updateEmbeddedDockerReference(); err != nil {
- return nil, err
+ return nil, "", "", err
}
// We compute preferredManifestMIMEType only to show it in error messages.
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType)
if err != nil {
- return nil, err
+ return nil, "", "", err
}
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
if err := ic.copyLayers(ctx); err != nil {
- return nil, err
+ return nil, "", "", err
}
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
// So, try the preferred manifest MIME type. If the process succeeds, fine…
- manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx)
+ manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
+ retManifestType = preferredManifestMIMEType
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
@@ -344,14 +592,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// We don’t have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Don’t bother the user with MIME types if we have no choice.
- return nil, err
+ return nil, "", "", err
}
// If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
// With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason,
// so let’s bail out early and with a better error message.
if !ic.canModifyManifest {
- return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
+ return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
@@ -359,7 +607,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
- attemptedManifest, err := ic.copyUpdatedConfigAndManifest(ctx)
+ attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
if err != nil {
logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
@@ -368,28 +616,30 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// We have successfully uploaded a manifest.
manifestBytes = attemptedManifest
+ retManifestDigest = attemptedManifestDigest
+ retManifestType = manifestMIMEType
errs = nil // Mark this as a success so that we don't abort below.
break
}
if errs != nil {
- return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
+ return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
}
}
if options.SignBy != "" {
newSig, err := c.createSignature(manifestBytes, options.SignBy)
if err != nil {
- return nil, err
+ return nil, "", "", err
}
sigs = append(sigs, newSig)
}
c.Printf("Storing signatures\n")
- if err := c.dest.PutSignatures(ctx, sigs); err != nil {
- return nil, errors.Wrap(err, "Error writing signatures")
+ if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil {
+ return nil, "", "", errors.Wrap(err, "Error writing signatures")
}
- return manifestBytes, nil
+ return manifestBytes, retManifestType, retManifestDigest, nil
}
// Printf writes a formatted string to c.reportWriter.
@@ -554,12 +804,13 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool {
}
// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
-// stores the resulting config and manifest to the destination, and returns the stored manifest.
-func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte, error) {
+// stores the resulting config and manifest to the destination, and returns the stored manifest
+// and its digest.
+func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
pendingImage := ic.src
if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) {
if !ic.canModifyManifest {
- return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
+ return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
}
if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
// We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
@@ -568,28 +819,35 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
// when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
// Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
// If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
- return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
+ return nil, "", errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
}
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
if err != nil {
- return nil, errors.Wrap(err, "Error creating an updated image manifest")
+ return nil, "", errors.Wrap(err, "Error creating an updated image manifest")
}
pendingImage = pi
}
- manifest, _, err := pendingImage.Manifest(ctx)
+ man, _, err := pendingImage.Manifest(ctx)
if err != nil {
- return nil, errors.Wrap(err, "Error reading manifest")
+ return nil, "", errors.Wrap(err, "Error reading manifest")
}
if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
- return nil, err
+ return nil, "", err
}
ic.c.Printf("Writing manifest to image destination\n")
- if err := ic.c.dest.PutManifest(ctx, manifest); err != nil {
- return nil, errors.Wrap(err, "Error writing manifest")
+ manifestDigest, err := manifest.Digest(man)
+ if err != nil {
+ return nil, "", err
+ }
+ if instanceDigest != nil {
+ instanceDigest = &manifestDigest
+ }
+ if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
+ return nil, "", errors.Wrap(err, "Error writing manifest")
}
- return manifest, nil
+ return man, manifestDigest, nil
}
// newProgressPool creates a *mpb.Progress and a cleanup function.
diff --git a/vendor/github.com/containers/image/v4/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go
index 7c981fcad..f5f6c9c5f 100644
--- a/vendor/github.com/containers/image/v4/copy/manifest.go
+++ b/vendor/github.com/containers/image/v5/copy/manifest.go
@@ -4,8 +4,8 @@ import (
"context"
"strings"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -119,3 +119,36 @@ func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) {
}
return manifest.MIMETypeIsMultiImage(mt), nil
}
+
+// determineListConversion takes the current MIME type of a list of manifests,
+// the list of MIME types supported for a given destination, and a possible
+// forced value, and returns the MIME type to which we should convert the list
+// of manifests, whether we are converting to it or using it unmodified.
+func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, error) {
+ // If we're forcing it, we prefer the forced value over everything else.
+ if forcedListMIMEType != "" {
+ return forcedListMIMEType, nil
+ }
+ // If there's no list of supported types, then anything we support is expected to be supported.
+ if len(destSupportedMIMETypes) == 0 {
+ destSupportedMIMETypes = manifest.SupportedListMIMETypes
+ }
+ var selectedType string
+ for i := range destSupportedMIMETypes {
+ // The second priority is the first member of the list of acceptable types that is a list,
+ // but keep going in case current type occurs later in the list.
+ if selectedType == "" && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) {
+ selectedType = destSupportedMIMETypes[i]
+ }
+ // The first priority is the current type, if it's in the list, since that lets us avoid a
+ // conversion that isn't strictly necessary.
+ if destSupportedMIMETypes[i] == currentListMIMEType {
+ selectedType = destSupportedMIMETypes[i]
+ }
+ }
+ if selectedType == "" {
+ return "", errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
+ }
+ // Done.
+ return selectedType, nil
+}
diff --git a/vendor/github.com/containers/image/v4/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_reader.go
index c6a1e3da6..1d0c41bce 100644
--- a/vendor/github.com/containers/image/v4/copy/progress_reader.go
+++ b/vendor/github.com/containers/image/v5/copy/progress_reader.go
@@ -4,7 +4,7 @@ import (
"io"
"time"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
)
// progressReader is a reader that reports its progress on an interval.
diff --git a/vendor/github.com/containers/image/v4/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go
index 64c3b4b2b..8f46e9de6 100644
--- a/vendor/github.com/containers/image/v4/copy/sign.go
+++ b/vendor/github.com/containers/image/v5/copy/sign.go
@@ -1,8 +1,8 @@
package copy
import (
- "github.com/containers/image/v4/signature"
- "github.com/containers/image/v4/transports"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/transports"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index 18f7dde70..2d6650de7 100644
--- a/vendor/github.com/containers/image/v4/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -7,7 +7,7 @@ import (
"os"
"path/filepath"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -199,16 +199,23 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
}
// PutManifest writes manifest to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for (when
+// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
+// by `manifest.Digest()`.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte) error {
- return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
+func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
+ return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
}
-func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+// PutSignatures writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
for i, sig := range signatures {
- if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil {
+ if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
return err
}
}
@@ -219,7 +226,7 @@ func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *dirImageDestination) Commit(ctx context.Context) error {
+func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error {
return nil
}
diff --git a/vendor/github.com/containers/image/v4/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go
index 921c1941c..ad9129d40 100644
--- a/vendor/github.com/containers/image/v4/directory/directory_src.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_src.go
@@ -6,10 +6,9 @@ import (
"io/ioutil"
"os"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
type dirImageSource struct {
@@ -38,10 +37,7 @@ func (s *dirImageSource) Close() error {
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- if instanceDigest != nil {
- return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
- }
- m, err := ioutil.ReadFile(s.ref.manifestPath())
+ m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest))
if err != nil {
return nil, "", err
}
@@ -73,12 +69,9 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`)
- }
signatures := [][]byte{}
for i := 0; ; i++ {
- signature, err := ioutil.ReadFile(s.ref.signaturePath(i))
+ signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest))
if err != nil {
if os.IsNotExist(err) {
break
@@ -90,7 +83,14 @@ func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *dige
return signatures, nil
}
-// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified.
-func (s *dirImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
return nil, nil
}
diff --git a/vendor/github.com/containers/image/v4/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go
index 29ac7115f..adfec6ef3 100644
--- a/vendor/github.com/containers/image/v4/directory/directory_transport.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go
@@ -6,11 +6,11 @@ import (
"path/filepath"
"strings"
- "github.com/containers/image/v4/directory/explicitfilepath"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@@ -166,18 +166,24 @@ func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContex
}
// manifestPath returns a path for the manifest within a directory using our conventions.
-func (ref dirReference) manifestPath() string {
+func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string {
+ if instanceDigest != nil {
+ return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json")
+ }
return filepath.Join(ref.path, "manifest.json")
}
// layerPath returns a path for a layer tarball within a directory using our conventions.
func (ref dirReference) layerPath(digest digest.Digest) string {
// FIXME: Should we keep the digest identification?
- return filepath.Join(ref.path, digest.Hex())
+ return filepath.Join(ref.path, digest.Encoded())
}
// signaturePath returns a path for a signature within a directory using our conventions.
-func (ref dirReference) signaturePath(index int) string {
+func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string {
+ if instanceDigest != nil {
+ return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1))
+ }
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
}
diff --git a/vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
index 71136b880..71136b880 100644
--- a/vendor/github.com/containers/image/v4/directory/explicitfilepath/path.go
+++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
diff --git a/vendor/github.com/containers/image/v4/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go
index 9e06e7c96..5845f63be 100644
--- a/vendor/github.com/containers/image/v4/docker/archive/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go
@@ -5,8 +5,8 @@ import (
"io"
"os"
- "github.com/containers/image/v4/docker/tarfile"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/tarfile"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -67,6 +67,6 @@ func (d *archiveImageDestination) Close() error {
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *archiveImageDestination) Commit(ctx context.Context) error {
+func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
return d.Destination.Commit(ctx)
}
diff --git a/vendor/github.com/containers/image/v4/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go
index feea0decd..a90707437 100644
--- a/vendor/github.com/containers/image/v4/docker/archive/src.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/src.go
@@ -2,8 +2,8 @@ package archive
import (
"context"
- "github.com/containers/image/v4/docker/tarfile"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/tarfile"
+ "github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
@@ -33,8 +33,3 @@ func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSourc
func (s *archiveImageSource) Reference() types.ImageReference {
return s.ref
}
-
-// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *archiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v4/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go
index 347fdbd6e..44213bb8d 100644
--- a/vendor/github.com/containers/image/v4/docker/archive/transport.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go
@@ -5,10 +5,10 @@ import (
"fmt"
"strings"
- "github.com/containers/image/v4/docker/reference"
- ctrImage "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ ctrImage "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/docker/cache.go b/vendor/github.com/containers/image/v5/docker/cache.go
index 51bf5b0d3..728d32d17 100644
--- a/vendor/github.com/containers/image/v4/docker/cache.go
+++ b/vendor/github.com/containers/image/v5/docker/cache.go
@@ -1,8 +1,8 @@
package docker
import (
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
)
// bicTransportScope returns a BICTransportScope appropriate for ref.
diff --git a/vendor/github.com/containers/image/v4/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go
index 94c4970f2..323a02fc0 100644
--- a/vendor/github.com/containers/image/v4/docker/daemon/client.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go
@@ -4,7 +4,7 @@ import (
"net/http"
"path/filepath"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
dockerclient "github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
)
diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
index 2c56ab934..25ce55a17 100644
--- a/vendor/github.com/containers/image/v4/docker/daemon/daemon_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
@@ -4,9 +4,9 @@ import (
"context"
"io"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/docker/tarfile"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/docker/tarfile"
+ "github.com/containers/image/v5/types"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -124,7 +124,7 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *daemonImageDestination) Commit(ctx context.Context) error {
+func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
logrus.Debugf("docker-daemon: Closing tar stream")
if err := d.Destination.Commit(ctx); err != nil {
return err
diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
index f6f60aaf9..46fbcc4e0 100644
--- a/vendor/github.com/containers/image/v4/docker/daemon/daemon_src.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
@@ -3,8 +3,8 @@ package daemon
import (
"context"
- "github.com/containers/image/v4/docker/tarfile"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/tarfile"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -55,8 +55,3 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
func (s *daemonImageSource) Reference() types.ImageReference {
return s.ref
}
-
-// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *daemonImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
index 4c6986ba0..4e4ed6881 100644
--- a/vendor/github.com/containers/image/v4/docker/daemon/daemon_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
@@ -4,11 +4,11 @@ import (
"context"
"fmt"
- "github.com/containers/image/v4/docker/policyconfiguration"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/policyconfiguration"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index d5662a030..0b012c703 100644
--- a/vendor/github.com/containers/image/v4/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -16,12 +16,12 @@ import (
"sync"
"time"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/pkg/docker/config"
- "github.com/containers/image/v4/pkg/sysregistriesv2"
- "github.com/containers/image/v4/pkg/tlsclientconfig"
- "github.com/containers/image/v4/types"
- "github.com/docker/distribution/registry/client"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/docker/config"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/pkg/tlsclientconfig"
+ "github.com/containers/image/v5/types"
+ clientLib "github.com/docker/distribution/registry/client"
"github.com/docker/go-connections/tlsconfig"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -47,14 +47,7 @@ const (
extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
)
-var (
- // ErrV1NotSupported is returned when we're trying to talk to a
- // docker V1 registry.
- ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
- // ErrUnauthorizedForCredentials is returned when the status code returned is 401
- ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password")
- systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"}
-)
+var systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"}
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
// signature represents a Docker image signature.
@@ -284,14 +277,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
}
defer resp.Body.Close()
- switch resp.StatusCode {
- case http.StatusOK:
- return nil
- case http.StatusUnauthorized:
- return ErrUnauthorizedForCredentials
- default:
- return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode))
- }
+ return httpResponseToError(resp)
}
// SearchResult holds the information of each matching image
@@ -365,7 +351,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode))
+ logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp))
} else {
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
return nil, err
@@ -382,7 +368,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode))
+ logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp))
} else {
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
@@ -417,8 +403,78 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// streamLen, if not -1, specifies the length of the data expected on stream.
// makeRequest should generally be preferred.
+// In case of an http 429 status code in the response, it performs an exponential back off starting at 2 seconds for at most 5 iterations.
+// If the `Retry-After` header is set in the response, the specified value or date is
+// If the stream is non-nil, no back off will be performed.
// TODO(runcom): too many arguments here, use a struct
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+ var (
+ res *http.Response
+ err error
+ delay int64
+ )
+ delay = 2
+ const numIterations = 5
+ const maxDelay = 60
+
+ // math.Min() only supports float64, so have an anonymous func to avoid
+ // casting.
+ min := func(a int64, b int64) int64 {
+ if a < b {
+ return a
+ }
+ return b
+ }
+
+ nextDelay := func(r *http.Response, delay int64) int64 {
+ after := res.Header.Get("Retry-After")
+ if after == "" {
+ return min(delay, maxDelay)
+ }
+ logrus.Debugf("detected 'Retry-After' header %q", after)
+ // First check if we have a numerical value.
+ if num, err := strconv.ParseInt(after, 10, 64); err == nil {
+ return min(num, maxDelay)
+ }
+ // Secondly check if we have an http date.
+ // If the delta between the date and now is positive, use it.
+ // Otherwise, fall back to using the default exponential back off.
+ if t, err := http.ParseTime(after); err == nil {
+ delta := int64(t.Sub(time.Now()).Seconds())
+ if delta > 0 {
+ return min(delta, maxDelay)
+ }
+ logrus.Debugf("negative date: falling back to using %d seconds", delay)
+ return min(delay, maxDelay)
+ }
+ // If the header contains bogus, fall back to using the default
+ // exponential back off.
+ logrus.Debugf("invalid format: falling back to using %d seconds", delay)
+ return min(delay, maxDelay)
+ }
+
+ for i := 0; i < numIterations; i++ {
+ res, err = c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
+ if stream == nil && res != nil && res.StatusCode == http.StatusTooManyRequests {
+ if i < numIterations-1 {
+ logrus.Errorf("HEADER %v", res.Header)
+ delay = nextDelay(res, delay) // compute next delay - does NOT exceed maxDelay
+ logrus.Debugf("too many request to %s: sleeping for %d seconds before next attempt", url, delay)
+ time.Sleep(time.Duration(delay) * time.Second)
+ delay = delay * 2 // exponential back off
+ }
+ continue
+ }
+ break
+ }
+ return res, err
+}
+
+// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
+// streamLen, if not -1, specifies the length of the data expected on stream.
+// makeRequest should generally be preferred.
+// Note that no exponential back off is performed when receiving an http 429 status code.
+func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
req, err := http.NewRequest(method, url, stream)
if err != nil {
return nil, err
@@ -533,9 +589,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
defer res.Body.Close()
switch res.StatusCode {
case http.StatusUnauthorized:
- err := client.HandleErrorResponse(res)
+ err := clientLib.HandleErrorResponse(res)
logrus.Debugf("Server response when trying to obtain an access token: \n%q", err.Error())
- return nil, ErrUnauthorizedForCredentials
+ return nil, ErrUnauthorizedForCredentials{Err: err}
case http.StatusOK:
break
default:
@@ -571,7 +627,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
defer resp.Body.Close()
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
- return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode))
+ return httpResponseToError(resp)
}
c.challenges = parseAuthHeader(resp.Header)
c.scheme = scheme
@@ -583,7 +639,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
err = ping("http")
}
if err != nil {
- err = errors.Wrap(err, "pinging docker registry returned")
+ err = errors.Wrapf(err, "error pinging docker registry %s", c.registry)
if c.sys != nil && c.sys.DockerDisableV1Ping {
return err
}
@@ -629,9 +685,11 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
return nil, err
}
defer res.Body.Close()
+
if res.StatusCode != http.StatusOK {
- return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
+ return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
}
+
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containers/image/v4/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 4332dc020..dad382cd0 100644
--- a/vendor/github.com/containers/image/v4/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -4,13 +4,12 @@ import (
"context"
"encoding/json"
"fmt"
- "net/http"
"net/url"
"strings"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -71,9 +70,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
return nil, err
}
defer res.Body.Close()
- if res.StatusCode != http.StatusOK {
- // print url also
- return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ if err := httpResponseToError(res); err != nil {
+ return nil, err
}
var tagsHolder struct {
diff --git a/vendor/github.com/containers/image/v4/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 0f351ab59..417d97aec 100644
--- a/vendor/github.com/containers/image/v4/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -14,12 +14,12 @@ import (
"path/filepath"
"strings"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/pkg/blobinfocache/none"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/api/errcode"
- "github.com/docker/distribution/registry/api/v2"
+ v2 "github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -61,6 +61,8 @@ func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
return []string{
imgspecv1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
+ imgspecv1.MediaTypeImageIndex,
+ manifest.DockerV2ListMediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
@@ -343,20 +345,47 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
// PutManifest writes manifest to the destination.
+// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list
+// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the
+// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error {
- digest, err := manifest.Digest(m)
- if err != nil {
- return err
+func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ refTail := ""
+ if instanceDigest != nil {
+ // If the instanceDigest is provided, then use it as the refTail, because the reference,
+ // whether it includes a tag or a digest, refers to the list as a whole, and not this
+ // particular instance.
+ refTail = instanceDigest.String()
+ // Double-check that the manifest we've been given matches the digest we've been given.
+ matches, err := manifest.MatchesDigest(m, *instanceDigest)
+ if err != nil {
+ return errors.Wrapf(err, "error digesting manifest in PutManifest")
+ }
+ if !matches {
+ manifestDigest, merr := manifest.Digest(m)
+ if merr != nil {
+ return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr)
+ }
+ return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
+ }
+ } else {
+ // Compute the digest of the main manifest, or the list if it's a list, so that we
+ // have a digest value to use if we're asked to save a signature for the manifest.
+ digest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.manifestDigest = digest
+ // The refTail should be either a digest (which we expect to match the value we just
+ // computed) or a tag name.
+ refTail, err = d.ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
}
- d.manifestDigest = digest
- refTail, err := d.ref.tagOrDigest()
- if err != nil {
- return err
- }
path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
headers := map[string][]string{}
@@ -416,19 +445,30 @@ func isManifestInvalidError(err error) bool {
}
}
-func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when
+// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
// Do not fail if we don’t really need to support signatures.
if len(signatures) == 0 {
return nil
}
+ if instanceDigest == nil {
+ if d.manifestDigest.String() == "" {
+ // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
+ return errors.Errorf("Unknown manifest digest, can't add signatures")
+ }
+ instanceDigest = &d.manifestDigest
+ }
+
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
- return d.putSignaturesToLookaside(signatures)
+ return d.putSignaturesToLookaside(signatures, instanceDigest)
case d.c.supportsSignatures:
- return d.putSignaturesToAPIExtension(ctx, signatures)
+ return d.putSignaturesToAPIExtension(ctx, signatures, instanceDigest)
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
@@ -436,7 +476,7 @@ func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
// which is not nil.
-func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, instanceDigest *digest.Digest) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
@@ -445,14 +485,9 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) e
return nil
}
- if d.manifestDigest.String() == "" {
- // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
- return errors.Errorf("Unknown manifest digest, can't add signatures")
- }
-
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
- url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
+ url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
@@ -467,7 +502,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) e
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
- url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
+ url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
@@ -527,22 +562,17 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
}
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
-func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error {
+func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
}
- if d.manifestDigest.String() == "" {
- // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
- return errors.Errorf("Unknown manifest digest, can't add signatures")
- }
-
// Because image signatures are a shared resource in Atomic Registry, the default upload
// always adds signatures. Eventually we should also allow removing signatures,
// but the X-Registry-Supports-Signatures API extension does not support that yet.
- existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest)
+ existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, *instanceDigest)
if err != nil {
return err
}
@@ -567,7 +597,7 @@ sigExists:
if err != nil || n != 16 {
return errors.Wrapf(err, "Error generating random signature len %d", n)
}
- signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
+ signatureName = fmt.Sprintf("%s@%032x", instanceDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
break
}
@@ -606,6 +636,6 @@ sigExists:
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *dockerImageDestination) Commit(ctx context.Context) error {
+func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error {
return nil
}
diff --git a/vendor/github.com/containers/image/v4/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 353b1a6c5..35beb30e5 100644
--- a/vendor/github.com/containers/image/v4/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -11,10 +11,10 @@ import (
"os"
"strconv"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/pkg/sysregistriesv2"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/client"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -103,8 +103,15 @@ func (s *dockerImageSource) Close() error {
return nil
}
-// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *dockerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
return nil, nil
}
@@ -232,9 +239,8 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
if err != nil {
return nil, 0, err
}
- if res.StatusCode != http.StatusOK {
- // print url also
- return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ if err := httpResponseToError(res); err != nil {
+ return nil, 0, err
}
cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
return res.Body, getBlobSize(res), nil
diff --git a/vendor/github.com/containers/image/v4/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go
index c9ce75e0d..8b8e57968 100644
--- a/vendor/github.com/containers/image/v4/docker/docker_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go
@@ -5,10 +5,10 @@ import (
"fmt"
"strings"
- "github.com/containers/image/v4/docker/policyconfiguration"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/policyconfiguration"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go
new file mode 100644
index 000000000..860868f41
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/errors.go
@@ -0,0 +1,43 @@
+package docker
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/docker/distribution/registry/client"
+ perrors "github.com/pkg/errors"
+)
+
+var (
+ // ErrV1NotSupported is returned when we're trying to talk to a
+ // docker V1 registry.
+ ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
+ // ErrTooManyRequests is returned when the status code returned is 429
+ ErrTooManyRequests = errors.New("too many request to registry")
+)
+
+// ErrUnauthorizedForCredentials is returned when the status code returned is 401
+type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise.
+ Err error
+}
+
+func (e ErrUnauthorizedForCredentials) Error() string {
+ return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error())
+}
+
+// httpResponseToError translates the https.Response into an error. It returns
+// nil if the response is not considered an error.
+func httpResponseToError(res *http.Response) error {
+ switch res.StatusCode {
+ case http.StatusOK:
+ return nil
+ case http.StatusTooManyRequests:
+ return ErrTooManyRequests
+ case http.StatusUnauthorized:
+ err := client.HandleErrorResponse(res)
+ return ErrUnauthorizedForCredentials{Err: err}
+ default:
+ return perrors.Errorf("invalid status code from registry %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ }
+}
diff --git a/vendor/github.com/containers/image/v4/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go
index c43160f72..918c0f838 100644
--- a/vendor/github.com/containers/image/v4/docker/lookaside.go
+++ b/vendor/github.com/containers/image/v5/docker/lookaside.go
@@ -9,8 +9,8 @@ import (
"path/filepath"
"strings"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
index e2ed631c5..61d9aab9a 100644
--- a/vendor/github.com/containers/image/v4/docker/policyconfiguration/naming.go
+++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
@@ -3,7 +3,7 @@ package policyconfiguration
import (
"strings"
- "github.com/containers/image/v4/docker/reference"
+ "github.com/containers/image/v5/docker/reference"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/docker/reference/README.md b/vendor/github.com/containers/image/v5/docker/reference/README.md
index 3c4d74eb4..3c4d74eb4 100644
--- a/vendor/github.com/containers/image/v4/docker/reference/README.md
+++ b/vendor/github.com/containers/image/v5/docker/reference/README.md
diff --git a/vendor/github.com/containers/image/v4/docker/reference/helpers.go b/vendor/github.com/containers/image/v5/docker/reference/helpers.go
index 978df7eab..978df7eab 100644
--- a/vendor/github.com/containers/image/v4/docker/reference/helpers.go
+++ b/vendor/github.com/containers/image/v5/docker/reference/helpers.go
diff --git a/vendor/github.com/containers/image/v4/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go
index 6a86ec64f..6a86ec64f 100644
--- a/vendor/github.com/containers/image/v4/docker/reference/normalize.go
+++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go
diff --git a/vendor/github.com/containers/image/v4/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go
index 8c0c23b2f..8c0c23b2f 100644
--- a/vendor/github.com/containers/image/v4/docker/reference/reference.go
+++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go
diff --git a/vendor/github.com/containers/image/v4/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go
index 786034932..786034932 100644
--- a/vendor/github.com/containers/image/v4/docker/reference/regexp.go
+++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go
diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
index aec8404b6..b02c60bb3 100644
--- a/vendor/github.com/containers/image/v4/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
@@ -12,10 +12,10 @@ import (
"path/filepath"
"time"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/internal/tmpdir"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -195,10 +195,15 @@ func (d *Destination) createRepositoriesFile(rootLayerID string) error {
}
// PutManifest writes manifest to the destination.
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *Destination) PutManifest(ctx context.Context, m []byte) error {
+func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.New(`Manifest lists are not supported for docker tar files`)
+ }
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
// so the caller trying a different manifest kind would be pointless.
var man manifest.Schema2
@@ -390,10 +395,13 @@ func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader
return nil
}
-// PutSignatures adds the given signatures to the docker tarfile (currently not
-// supported). MUST be called after PutManifest (signatures reference manifest
-// contents)
-func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
+func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.Errorf(`Manifest lists are not supported for docker tar files`)
+ }
if len(signatures) != 0 {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/doc.go b/vendor/github.com/containers/image/v5/docker/tarfile/doc.go
index 4ea5369c0..4ea5369c0 100644
--- a/vendor/github.com/containers/image/v4/docker/tarfile/doc.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/doc.go
diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go
index 78e4d6f65..ad0a3d2cb 100644
--- a/vendor/github.com/containers/image/v4/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go
@@ -11,10 +11,10 @@ import (
"path"
"sync"
- "github.com/containers/image/v4/internal/tmpdir"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/pkg/compression"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@@ -349,10 +349,12 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be no secondary instances.
func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
// How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
- return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
+ return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`)
}
if s.generatedManifest == nil {
if err := s.ensureCachedDataIsPresent(); err != nil {
@@ -466,9 +468,8 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as there can be no secondary manifests.
func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
if instanceDigest != nil {
// How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
@@ -476,3 +477,14 @@ func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Diges
}
return [][]byte{}, nil
}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be no secondary manifests.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v4/docker/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/tarfile/types.go
index c630f5227..ac222528a 100644
--- a/vendor/github.com/containers/image/v4/docker/tarfile/types.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/types.go
@@ -1,7 +1,7 @@
package tarfile
import (
- "github.com/containers/image/v4/manifest"
+ "github.com/containers/image/v5/manifest"
"github.com/opencontainers/go-digest"
)
diff --git a/vendor/github.com/containers/image/v4/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
index 23664a74a..23664a74a 100644
--- a/vendor/github.com/containers/image/v4/docker/wwwauthenticate.go
+++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/image/docker_list.go
new file mode 100644
index 000000000..651c301aa
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/image/docker_list.go
@@ -0,0 +1,34 @@
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+)
+
+func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
+ list, err := manifest.Schema2ListFromManifest(manblob)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing schema2 manifest list")
+ }
+ targetManifestDigest, err := list.ChooseInstance(sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error choosing image instance")
+ }
+ manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error loading manifest for target platform")
+ }
+
+ matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error computing manifest digest")
+ }
+ if !matches {
+ return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ }
+
+ return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
+}
diff --git a/vendor/github.com/containers/image/v4/image/docker_schema1.go b/vendor/github.com/containers/image/v5/image/docker_schema1.go
index 97ebeac06..1a1c39d55 100644
--- a/vendor/github.com/containers/image/v4/image/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/image/docker_schema1.go
@@ -3,9 +3,9 @@ package image
import (
"context"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/image/v4/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go
index 9841bbd42..254c13f78 100644
--- a/vendor/github.com/containers/image/v4/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go
@@ -10,10 +10,10 @@ import (
"io/ioutil"
"strings"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/pkg/blobinfocache/none"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/image/v4/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go
index f384d2fb8..fe66da157 100644
--- a/vendor/github.com/containers/image/v4/image/manifest.go
+++ b/vendor/github.com/containers/image/v5/image/manifest.go
@@ -4,9 +4,9 @@ import (
"context"
"fmt"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -58,6 +58,8 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src
return manifestSchema2FromManifest(src, manblob)
case manifest.DockerV2ListMediaType:
return manifestSchema2FromManifestList(ctx, sys, src, manblob)
+ case imgspecv1.MediaTypeImageIndex:
+ return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
}
diff --git a/vendor/github.com/containers/image/v4/image/memory.go b/vendor/github.com/containers/image/v5/image/memory.go
index 255965e14..4c96b37d8 100644
--- a/vendor/github.com/containers/image/v4/image/memory.go
+++ b/vendor/github.com/containers/image/v5/image/memory.go
@@ -3,9 +3,8 @@ package image
import (
"context"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
-
- "github.com/containers/image/v4/types"
)
// memoryImage is a mostly-implementation of types.Image assembled from data
diff --git a/vendor/github.com/containers/image/v4/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go
index 142b0f28f..18a38d463 100644
--- a/vendor/github.com/containers/image/v4/image/oci.go
+++ b/vendor/github.com/containers/image/v5/image/oci.go
@@ -6,10 +6,10 @@ import (
"fmt"
"io/ioutil"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/pkg/blobinfocache/none"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/image/oci_index.go
new file mode 100644
index 000000000..022e03aca
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/image/oci_index.go
@@ -0,0 +1,34 @@
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/pkg/errors"
+)
+
+func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
+ index, err := manifest.OCI1IndexFromManifest(manblob)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing OCI1 index")
+ }
+ targetManifestDigest, err := index.ChooseInstance(sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error choosing image instance")
+ }
+ manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error loading manifest for target platform")
+ }
+
+ matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error computing manifest digest")
+ }
+ if !matches {
+ return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ }
+
+ return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
+}
diff --git a/vendor/github.com/containers/image/v4/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go
index d2a3e2ee6..3a016e1d0 100644
--- a/vendor/github.com/containers/image/v4/image/sourced.go
+++ b/vendor/github.com/containers/image/v5/image/sourced.go
@@ -6,7 +6,7 @@ package image
import (
"context"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
)
// imageCloser implements types.ImageCloser, perhaps allowing simple users
@@ -100,5 +100,5 @@ func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
}
func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- return i.UnparsedImage.src.LayerInfosForCopy(ctx)
+ return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
}
diff --git a/vendor/github.com/containers/image/v4/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go
index d73107654..4e3028d85 100644
--- a/vendor/github.com/containers/image/v4/image/unparsed.go
+++ b/vendor/github.com/containers/image/v5/image/unparsed.go
@@ -3,9 +3,9 @@ package image
import (
"context"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
index 88e123cdd..88e123cdd 100644
--- a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/key.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
index 4bf170156..4bf170156 100644
--- a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/keyring.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
index ae9697149..ae9697149 100644
--- a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/perm.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
diff --git a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
index 196c82760..196c82760 100644
--- a/vendor/github.com/containers/image/v4/internal/pkg/keyctl/sys_linux.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
diff --git a/vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
index 8c776929c..8c776929c 100644
--- a/vendor/github.com/containers/image/v4/internal/tmpdir/tmpdir.go
+++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
diff --git a/vendor/github.com/containers/image/v4/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
index 3c172504a..58527d713 100644
--- a/vendor/github.com/containers/image/v4/manifest/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
@@ -6,8 +6,8 @@ import (
"strings"
"time"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
diff --git a/vendor/github.com/containers/image/v4/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index 84b189c8e..d768d6e11 100644
--- a/vendor/github.com/containers/image/v4/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -5,9 +5,9 @@ import (
"fmt"
"time"
- "github.com/containers/image/v4/pkg/compression"
- "github.com/containers/image/v4/pkg/strslice"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/pkg/strslice"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
new file mode 100644
index 000000000..453976c48
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
@@ -0,0 +1,216 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+ "runtime"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// Schema2PlatformSpec describes the platform which a particular manifest is
+// specialized for.
+type Schema2PlatformSpec struct {
+ Architecture string `json:"architecture"`
+ OS string `json:"os"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+ Variant string `json:"variant,omitempty"`
+ Features []string `json:"features,omitempty"` // removed in OCI
+}
+
+// Schema2ManifestDescriptor references a platform-specific manifest.
+type Schema2ManifestDescriptor struct {
+ Schema2Descriptor
+ Platform Schema2PlatformSpec `json:"platform"`
+}
+
+// Schema2List is a list of platform-specific manifests.
+type Schema2List struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ Manifests []Schema2ManifestDescriptor `json:"manifests"`
+}
+
+// MIMEType returns the MIME type of this particular manifest list.
+func (list *Schema2List) MIMEType() string {
+ return list.MediaType
+}
+
+// Instances returns a slice of digests of the manifests that this list knows of.
+func (list *Schema2List) Instances() []digest.Digest {
+ results := make([]digest.Digest, len(list.Manifests))
+ for i, m := range list.Manifests {
+ results[i] = m.Digest
+ }
+ return results
+}
+
+// Instance returns the ListUpdate of a particular instance in the list.
+func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
+ for _, manifest := range list.Manifests {
+ if manifest.Digest == instanceDigest {
+ return ListUpdate{
+ Digest: manifest.Digest,
+ Size: manifest.Size,
+ MediaType: manifest.MediaType,
+ }, nil
+ }
+ }
+ return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
+}
+
+// UpdateInstances updates the sizes, digests, and media types of the manifests
+// which the list catalogs.
+func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
+ if len(updates) != len(list.Manifests) {
+ return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
+ }
+ for i := range updates {
+ if err := updates[i].Digest.Validate(); err != nil {
+ return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates))
+ }
+ list.Manifests[i].Digest = updates[i].Digest
+ if updates[i].Size < 0 {
+ return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
+ }
+ list.Manifests[i].Size = updates[i].Size
+ if updates[i].MediaType == "" {
+ return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
+ }
+ if err := SupportedSchema2MediaType(updates[i].MediaType); err != nil && SupportedOCI1MediaType(updates[i].MediaType) != nil {
+ return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), list.Manifests[i].MediaType, updates[i].MediaType)
+ }
+ list.Manifests[i].MediaType = updates[i].MediaType
+ }
+ return nil
+}
+
+// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
+// of the image which is appropriate for the current environment.
+func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
+ wantedArch := runtime.GOARCH
+ if ctx != nil && ctx.ArchitectureChoice != "" {
+ wantedArch = ctx.ArchitectureChoice
+ }
+ wantedOS := runtime.GOOS
+ if ctx != nil && ctx.OSChoice != "" {
+ wantedOS = ctx.OSChoice
+ }
+
+ for _, d := range list.Manifests {
+ if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS {
+ return d.Digest, nil
+ }
+ }
+ return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS)
+}
+
+// Serialize returns the list in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (list *Schema2List) Serialize() ([]byte, error) {
+ buf, err := json.Marshal(list)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error marshaling Schema2List %#v", list)
+ }
+ return buf, nil
+}
+
+// Schema2ListFromComponents creates a Schema2 manifest list instance from the
+// supplied data.
+func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List {
+ list := Schema2List{
+ SchemaVersion: 2,
+ MediaType: DockerV2ListMediaType,
+ Manifests: make([]Schema2ManifestDescriptor, len(components)),
+ }
+ for i, component := range components {
+ m := Schema2ManifestDescriptor{
+ Schema2Descriptor{
+ MediaType: component.MediaType,
+ Size: component.Size,
+ Digest: component.Digest,
+ URLs: dupStringSlice(component.URLs),
+ },
+ Schema2PlatformSpec{
+ Architecture: component.Platform.Architecture,
+ OS: component.Platform.OS,
+ OSVersion: component.Platform.OSVersion,
+ OSFeatures: dupStringSlice(component.Platform.OSFeatures),
+ Variant: component.Platform.Variant,
+ Features: dupStringSlice(component.Platform.Features),
+ },
+ }
+ list.Manifests[i] = m
+ }
+ return &list
+}
+
+// Schema2ListClone creates a deep copy of the passed-in list.
+func Schema2ListClone(list *Schema2List) *Schema2List {
+ return Schema2ListFromComponents(list.Manifests)
+}
+
+// ToOCI1Index returns the list encoded as an OCI1 index.
+func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) {
+ components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
+ for _, manifest := range list.Manifests {
+ converted := imgspecv1.Descriptor{
+ MediaType: manifest.MediaType,
+ Size: manifest.Size,
+ Digest: manifest.Digest,
+ URLs: dupStringSlice(manifest.URLs),
+ Platform: &imgspecv1.Platform{
+ OS: manifest.Platform.OS,
+ Architecture: manifest.Platform.Architecture,
+ OSFeatures: dupStringSlice(manifest.Platform.OSFeatures),
+ OSVersion: manifest.Platform.OSVersion,
+ Variant: manifest.Platform.Variant,
+ },
+ }
+ components = append(components, converted)
+ }
+ oci := OCI1IndexFromComponents(components, nil)
+ return oci, nil
+}
+
+// ToSchema2List returns the list encoded as a Schema2 list.
+func (list *Schema2List) ToSchema2List() (*Schema2List, error) {
+ return Schema2ListClone(list), nil
+}
+
+// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
+// JSON, presumably generated by encoding a Schema2 manifest list.
+func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
+ list := Schema2List{
+ Manifests: []Schema2ManifestDescriptor{},
+ }
+ if err := json.Unmarshal(manifest, &list); err != nil {
+ return nil, errors.Wrapf(err, "error unmarshaling Schema2List %q", string(manifest))
+ }
+ return &list, nil
+}
+
+// Clone returns a deep copy of this list and its contents.
+func (list *Schema2List) Clone() List {
+ return Schema2ListClone(list)
+}
+
+// ConvertToMIMEType converts the passed-in manifest list to a manifest
+// list of the specified type.
+func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) {
+ switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
+ case DockerV2ListMediaType:
+ return list.Clone(), nil
+ case imgspecv1.MediaTypeImageIndex:
+ return list.ToOCI1Index()
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
+ default:
+ // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go
new file mode 100644
index 000000000..6d10430fd
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/manifest/list.go
@@ -0,0 +1,106 @@
+package manifest
+
+import (
+ "fmt"
+
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var (
+ // SupportedListMIMETypes is a list of the manifest list types that we know how to
+ // read/manipulate/write.
+ SupportedListMIMETypes = []string{
+ DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageIndex,
+ }
+)
+
+// List is an interface for parsing, modifying lists of image manifests.
+// Callers can either use this abstract interface without understanding the details of the formats,
+// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
+// directly.
+type List interface {
+ // MIMEType returns the MIME type of this particular manifest list.
+ MIMEType() string
+
+ // Instances returns a list of the manifests that this list knows of, other than its own.
+ Instances() []digest.Digest
+
+ // Update information about the list's instances. The length of the passed-in slice must
+ // match the length of the list of instances which the list already contains, and every field
+ // must be specified.
+ UpdateInstances([]ListUpdate) error
+
+ // Instance returns the size and MIME type of a particular instance in the list.
+ Instance(digest.Digest) (ListUpdate, error)
+
+ // ChooseInstance selects which manifest is most appropriate for the platform described by the
+ // SystemContext, or for the current platform if the SystemContext doesn't specify any details.
+ ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
+
+ // Serialize returns the list in a blob format.
+ // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
+ // from, even if no modifications were made!
+ Serialize() ([]byte, error)
+
+ // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
+ ConvertToMIMEType(mimeType string) (List, error)
+
+ // Clone returns a deep copy of this list and its contents.
+ Clone() List
+}
+
+// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
+type ListUpdate struct {
+ Digest digest.Digest
+ Size int64
+ MediaType string
+}
+
+// dupStringSlice returns a deep copy of a slice of strings, or nil if the
+// source slice is empty.
+func dupStringSlice(list []string) []string {
+ if len(list) == 0 {
+ return nil
+ }
+ dup := make([]string, len(list))
+ for i := range list {
+ dup[i] = list[i]
+ }
+ return dup
+}
+
+// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
+// passed-in map is nil or has no keys.
+func dupStringStringMap(m map[string]string) map[string]string {
+ if len(m) == 0 {
+ return nil
+ }
+ result := make(map[string]string)
+ for k, v := range m {
+ result[k] = v
+ }
+ return result
+}
+
+// ListFromBlob parses a list of manifests.
+func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
+ normalized := NormalizedMIMEType(manifestMIMEType)
+ switch normalized {
+ case DockerV2ListMediaType:
+ return Schema2ListFromManifest(manifest)
+ case imgspecv1.MediaTypeImageIndex:
+ return OCI1IndexFromManifest(manifest)
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
+ }
+ return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
+}
+
+// ConvertListToMIMEType converts the passed-in manifest list to a manifest
+// list of the specified type.
+func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) {
+ return list.ConvertToMIMEType(manifestMIMEType)
+}
diff --git a/vendor/github.com/containers/image/v4/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go
index 32af97ea8..5b4d341d8 100644
--- a/vendor/github.com/containers/image/v4/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/manifest/manifest.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"fmt"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -52,6 +52,7 @@ var DefaultRequestedManifestMIMETypes = []string{
DockerV2Schema1SignedMediaType,
DockerV2Schema1MediaType,
DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageIndex,
}
// Manifest is an interface for parsing, modifying image manifests in isolation.
@@ -140,8 +141,11 @@ func GuessMIMEType(manifest []byte) string {
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
return ""
}
- if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest {
- return imgspecv1.MediaTypeImageIndex
+ if len(ociIndex.Manifests) != 0 {
+ if ociMan.Config.MediaType == "" {
+ return imgspecv1.MediaTypeImageIndex
+ }
+ return ociMan.Config.MediaType
}
return DockerV2Schema2MediaType
}
@@ -199,7 +203,7 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
// MIMETypeIsMultiImage returns true if mimeType is a list of images
func MIMETypeIsMultiImage(mimeType string) bool {
- return mimeType == DockerV2ListMediaType
+ return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex
}
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
@@ -213,6 +217,7 @@ func NormalizedMIMEType(input string) string {
return DockerV2Schema1SignedMediaType
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
DockerV2Schema2MediaType,
DockerV2ListMediaType:
return input
@@ -232,18 +237,19 @@ func NormalizedMIMEType(input string) string {
// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
func FromBlob(manblob []byte, mt string) (Manifest, error) {
- switch NormalizedMIMEType(mt) {
+ nmt := NormalizedMIMEType(mt)
+ switch nmt {
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType:
return Schema1FromManifest(manblob)
case imgspecv1.MediaTypeImageManifest:
return OCI1FromManifest(manblob)
case DockerV2Schema2MediaType:
return Schema2FromManifest(manblob)
- case DockerV2ListMediaType:
+ case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex:
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
- default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
- return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
}
+ // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
}
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
diff --git a/vendor/github.com/containers/image/v4/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index e483bbb19..46c551b18 100644
--- a/vendor/github.com/containers/image/v4/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -4,8 +4,8 @@ import (
"encoding/json"
"fmt"
- "github.com/containers/image/v4/pkg/compression"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go
new file mode 100644
index 000000000..816503ce5
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go
@@ -0,0 +1,221 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+ "runtime"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspec "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// OCI1Index is just an alias for the OCI index type, but one which we can
+// provide methods for.
+type OCI1Index struct {
+ imgspecv1.Index
+}
+
+// MIMEType returns the MIME type of this particular manifest index.
+func (index *OCI1Index) MIMEType() string {
+ return imgspecv1.MediaTypeImageIndex
+}
+
+// Instances returns a slice of digests of the manifests that this index knows of.
+func (index *OCI1Index) Instances() []digest.Digest {
+ results := make([]digest.Digest, len(index.Manifests))
+ for i, m := range index.Manifests {
+ results[i] = m.Digest
+ }
+ return results
+}
+
+// Instance returns the ListUpdate of a particular instance in the index.
+func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
+ for _, manifest := range index.Manifests {
+ if manifest.Digest == instanceDigest {
+ return ListUpdate{
+ Digest: manifest.Digest,
+ Size: manifest.Size,
+ MediaType: manifest.MediaType,
+ }, nil
+ }
+ }
+ return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
+}
+
+// UpdateInstances updates the sizes, digests, and media types of the manifests
+// which the list catalogs.
+func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
+ if len(updates) != len(index.Manifests) {
+ return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
+ }
+ for i := range updates {
+ if err := updates[i].Digest.Validate(); err != nil {
+ return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates))
+ }
+ index.Manifests[i].Digest = updates[i].Digest
+ if updates[i].Size < 0 {
+ return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
+ }
+ index.Manifests[i].Size = updates[i].Size
+ if updates[i].MediaType == "" {
+ return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
+ }
+ if err := SupportedOCI1MediaType(updates[i].MediaType); err != nil && SupportedSchema2MediaType(updates[i].MediaType) != nil && updates[i].MediaType != imgspecv1.MediaTypeImageIndex {
+ return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), index.Manifests[i].MediaType, updates[i].MediaType)
+ }
+ index.Manifests[i].MediaType = updates[i].MediaType
+ }
+ return nil
+}
+
+// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
+// of the image which is appropriate for the current environment.
+func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
+ wantedArch := runtime.GOARCH
+ if ctx != nil && ctx.ArchitectureChoice != "" {
+ wantedArch = ctx.ArchitectureChoice
+ }
+ wantedOS := runtime.GOOS
+ if ctx != nil && ctx.OSChoice != "" {
+ wantedOS = ctx.OSChoice
+ }
+
+ for _, d := range index.Manifests {
+ if d.Platform != nil && d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS {
+ return d.Digest, nil
+ }
+ }
+ for _, d := range index.Manifests {
+ if d.Platform == nil {
+ return d.Digest, nil
+ }
+ }
+ return "", fmt.Errorf("no image found in image index for architecture %s, OS %s", wantedArch, wantedOS)
+}
+
+// Serialize returns the index in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (index *OCI1Index) Serialize() ([]byte, error) {
+ buf, err := json.Marshal(index)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error marshaling OCI1Index %#v", index)
+ }
+ return buf, nil
+}
+
+// OCI1IndexFromComponents creates an OCI1 image index instance from the
+// supplied data.
+func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index {
+ index := OCI1Index{
+ imgspecv1.Index{
+ Versioned: imgspec.Versioned{SchemaVersion: 2},
+ Manifests: make([]imgspecv1.Descriptor, len(components)),
+ Annotations: dupStringStringMap(annotations),
+ },
+ }
+ for i, component := range components {
+ var platform *imgspecv1.Platform
+ if component.Platform != nil {
+ platform = &imgspecv1.Platform{
+ Architecture: component.Platform.Architecture,
+ OS: component.Platform.OS,
+ OSVersion: component.Platform.OSVersion,
+ OSFeatures: dupStringSlice(component.Platform.OSFeatures),
+ Variant: component.Platform.Variant,
+ }
+ }
+ m := imgspecv1.Descriptor{
+ MediaType: component.MediaType,
+ Size: component.Size,
+ Digest: component.Digest,
+ URLs: dupStringSlice(component.URLs),
+ Annotations: dupStringStringMap(component.Annotations),
+ Platform: platform,
+ }
+ index.Manifests[i] = m
+ }
+ return &index
+}
+
+// OCI1IndexClone creates a deep copy of the passed-in index.
+func OCI1IndexClone(index *OCI1Index) *OCI1Index {
+ return OCI1IndexFromComponents(index.Manifests, index.Annotations)
+}
+
+// ToOCI1Index returns the index encoded as an OCI1 index.
+func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) {
+ return OCI1IndexClone(index), nil
+}
+
+// ToSchema2List returns the index encoded as a Schema2 list.
+func (index *OCI1Index) ToSchema2List() (*Schema2List, error) {
+ components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
+ for _, manifest := range index.Manifests {
+ platform := manifest.Platform
+ if platform == nil {
+ platform = &imgspecv1.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ }
+ }
+ converted := Schema2ManifestDescriptor{
+ Schema2Descriptor{
+ MediaType: manifest.MediaType,
+ Size: manifest.Size,
+ Digest: manifest.Digest,
+ URLs: dupStringSlice(manifest.URLs),
+ },
+ Schema2PlatformSpec{
+ OS: platform.OS,
+ Architecture: platform.Architecture,
+ OSFeatures: dupStringSlice(platform.OSFeatures),
+ OSVersion: platform.OSVersion,
+ Variant: platform.Variant,
+ },
+ }
+ components = append(components, converted)
+ }
+ s2 := Schema2ListFromComponents(components)
+ return s2, nil
+}
+
+// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled
+// JSON, presumably generated by encoding a OCI1 manifest index.
+func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
+ index := OCI1Index{
+ Index: imgspecv1.Index{
+ Versioned: imgspec.Versioned{SchemaVersion: 2},
+ Manifests: []imgspecv1.Descriptor{},
+ Annotations: make(map[string]string),
+ },
+ }
+ if err := json.Unmarshal(manifest, &index); err != nil {
+ return nil, errors.Wrapf(err, "error unmarshaling OCI1Index %q", string(manifest))
+ }
+ return &index, nil
+}
+
+// Clone returns a deep copy of this list and its contents.
+func (index *OCI1Index) Clone() List {
+ return OCI1IndexClone(index)
+}
+
+// ConvertToMIMEType converts the passed-in image index to a manifest list of
+// the specified type.
+func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) {
+ switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
+ case DockerV2ListMediaType:
+ return index.ToSchema2List()
+ case imgspecv1.MediaTypeImageIndex:
+ return index.Clone(), nil
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
+ default:
+ // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
+ }
+}
diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
index 2455ed575..164d5522d 100644
--- a/vendor/github.com/containers/image/v4/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
@@ -5,8 +5,9 @@ import (
"io"
"os"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@@ -105,19 +106,26 @@ func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info ty
return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
}
-// PutManifest writes manifest to the destination
-func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte) error {
- return d.unpackedDest.PutManifest(ctx, m)
+// PutManifest writes the manifest to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when
+// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
+// by `manifest.Digest()`.
+func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ return d.unpackedDest.PutManifest(ctx, m, instanceDigest)
}
-func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
- return d.unpackedDest.PutSignatures(ctx, signatures)
+// PutSignatures writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest)
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted
// after the directory is made, it is tarred up into a file and the directory is deleted
-func (d *ociArchiveImageDestination) Commit(ctx context.Context) error {
- if err := d.unpackedDest.Commit(ctx); err != nil {
+func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
return errors.Wrapf(err, "error storing image %q", d.ref.image)
}
diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
index 8a479883f..33a41d44b 100644
--- a/vendor/github.com/containers/image/v4/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
@@ -4,8 +4,8 @@ import (
"context"
"io"
- ocilayout "github.com/containers/image/v4/oci/layout"
- "github.com/containers/image/v4/types"
+ ocilayout "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@@ -96,7 +96,14 @@ func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDiges
return s.unpackedSrc.GetSignatures(ctx, instanceDigest)
}
-// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- return nil, nil
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return s.unpackedSrc.LayerInfosForCopy(ctx, instanceDigest)
}
diff --git a/vendor/github.com/containers/image/v4/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
index c8808ecb5..2d72a6fee 100644
--- a/vendor/github.com/containers/image/v4/oci/archive/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
@@ -7,14 +7,14 @@ import (
"os"
"strings"
- "github.com/containers/image/v4/directory/explicitfilepath"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/internal/tmpdir"
- "github.com/containers/image/v4/oci/internal"
- ocilayout "github.com/containers/image/v4/oci/layout"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/oci/internal"
+ ocilayout "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
index c2012e50e..c2012e50e 100644
--- a/vendor/github.com/containers/image/v4/oci/internal/oci_util.go
+++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
index 20925d3dc..370e8d2cd 100644
--- a/vendor/github.com/containers/image/v4/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
@@ -9,8 +9,8 @@ import (
"path/filepath"
"runtime"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -38,6 +38,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag
Versioned: imgspec.Versioned{
SchemaVersion: 2,
},
+ Annotations: make(map[string]string),
}
}
@@ -73,6 +74,7 @@ func (d *ociImageDestination) Close() error {
func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
return []string{
imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
}
}
@@ -205,20 +207,27 @@ func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}
-// PutManifest writes manifest to the destination.
+// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types,
+// this should be either an OCI manifest (possibly converted to this format by the caller) or index,
+// neither of which we'll need to modify further.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when
+// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
+// by `manifest.Digest()`.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error {
- digest, err := manifest.Digest(m)
- if err != nil {
- return err
+func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ var digest digest.Digest
+ var err error
+ if instanceDigest != nil {
+ digest = *instanceDigest
+ } else {
+ digest, err = manifest.Digest(m)
+ if err != nil {
+ return err
+ }
}
- desc := imgspecv1.Descriptor{}
- desc.Digest = digest
- // TODO(runcom): beaware and add support for OCI manifest list
- desc.MediaType = imgspecv1.MediaTypeImageManifest
- desc.Size = int64(len(m))
blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir)
if err != nil {
@@ -231,32 +240,59 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error {
return err
}
- if d.ref.image != "" {
- annotations := make(map[string]string)
- annotations["org.opencontainers.image.ref.name"] = d.ref.image
- desc.Annotations = annotations
+ if instanceDigest != nil {
+ return nil
}
- desc.Platform = &imgspecv1.Platform{
- Architecture: runtime.GOARCH,
- OS: runtime.GOOS,
+
+ // If we had platform information, we'd build an imgspecv1.Platform structure here.
+
+ // Start filling out the descriptor for this entry
+ desc := imgspecv1.Descriptor{}
+ desc.Digest = digest
+ desc.Size = int64(len(m))
+ if d.ref.image != "" {
+ desc.Annotations = make(map[string]string)
+ desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image
}
+
+ // If we knew the MIME type, we wouldn't have to guess here.
+ desc.MediaType = manifest.GuessMIMEType(m)
+
d.addManifest(&desc)
return nil
}
func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
+ // If the new entry has a name, remove any conflicting names which we already have.
+ if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" {
+ // The name is being set on a new entry, so remove any older ones that had the same name.
+ // We might be storing an index and all of its component images, and we'll want to attach
+ // the name to the last one, which is the index.
+ for i, manifest := range d.index.Manifests {
+ if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] {
+ delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName)
+ break
+ }
+ }
+ }
+ // If it has the same digest as another entry in the index, we already overwrote the file,
+ // so just pick up the other information.
for i, manifest := range d.index.Manifests {
- if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] {
- // TODO Should there first be a cleanup based on the descriptor we are going to replace?
+ if manifest.Digest == desc.Digest {
+ // Replace it completely.
d.index.Manifests[i] = *desc
return
}
}
+ // It's a new entry to be added to the index.
d.index.Manifests = append(d.index.Manifests, *desc)
}
-func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+// PutSignatures would add the given signatures to the oci layout (currently not supported).
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
if len(signatures) != 0 {
return errors.Errorf("Pushing signatures for OCI images is not supported")
}
@@ -267,7 +303,7 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *ociImageDestination) Commit(ctx context.Context) error {
+func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
index dd6c6c4a6..f515203df 100644
--- a/vendor/github.com/containers/image/v4/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
@@ -8,8 +8,9 @@ import (
"os"
"strconv"
- "github.com/containers/image/v4/pkg/tlsclientconfig"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/tlsclientconfig"
+ "github.com/containers/image/v5/types"
"github.com/docker/go-connections/tlsconfig"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -18,6 +19,7 @@ import (
type ociImageSource struct {
ref ociReference
+ index *imgspecv1.Index
descriptor imgspecv1.Descriptor
client *http.Client
sharedBlobDir string
@@ -41,7 +43,11 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSour
if err != nil {
return nil, err
}
- d := &ociImageSource{ref: ref, descriptor: descriptor, client: client}
+ index, err := ref.getIndex()
+ if err != nil {
+ return nil, err
+ }
+ d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client}
if sys != nil {
// TODO(jonboulle): check dir existence?
d.sharedBlobDir = sys.OCISharedBlobDirPath
@@ -66,28 +72,33 @@ func (s *ociImageSource) Close() error {
func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
var dig digest.Digest
var mimeType string
+ var err error
+
if instanceDigest == nil {
dig = digest.Digest(s.descriptor.Digest)
mimeType = s.descriptor.MediaType
} else {
dig = *instanceDigest
- // XXX: instanceDigest means that we don't immediately have the context of what
- // mediaType the manifest has. In OCI this means that we don't know
- // what reference it came from, so we just *assume* that its
- // MediaTypeImageManifest.
- // FIXME: We should actually be able to look up the manifest in the index,
- // and see the MIME type there.
- mimeType = imgspecv1.MediaTypeImageManifest
+ for _, md := range s.index.Manifests {
+ if md.Digest == dig {
+ mimeType = md.MediaType
+ break
+ }
+ }
}
manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir)
if err != nil {
return nil, "", err
}
+
m, err := ioutil.ReadFile(manifestPath)
if err != nil {
return nil, "", err
}
+ if mimeType == "" {
+ mimeType = manifest.GuessMIMEType(m)
+ }
return m, mimeType, nil
}
@@ -157,8 +168,15 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
return nil, 0, errWrap
}
-// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *ociImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
return nil, nil
}
diff --git a/vendor/github.com/containers/image/v4/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
index 259852b4d..c662c9a7a 100644
--- a/vendor/github.com/containers/image/v4/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
@@ -8,12 +8,12 @@ import (
"path/filepath"
"strings"
- "github.com/containers/image/v4/directory/explicitfilepath"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/oci/internal"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/oci/internal"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@@ -195,10 +195,10 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
} else {
// if image specified, look through all manifests for a match
for _, md := range index.Manifests {
- if md.MediaType != imgspecv1.MediaTypeImageManifest {
+ if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex {
continue
}
- refName, ok := md.Annotations["org.opencontainers.image.ref.name"]
+ refName, ok := md.Annotations[imgspecv1.AnnotationRefName]
if !ok {
continue
}
diff --git a/vendor/github.com/containers/image/v4/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index f45dc24c4..f45dc24c4 100644
--- a/vendor/github.com/containers/image/v4/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
diff --git a/vendor/github.com/containers/image/v4/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go
index 51fff6269..016de4803 100644
--- a/vendor/github.com/containers/image/v4/openshift/openshift.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift.go
@@ -12,11 +12,11 @@ import (
"net/url"
"strings"
- "github.com/containers/image/v4/docker"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
- "github.com/containers/image/v4/version"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/image/v5/version"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -231,16 +231,16 @@ func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo,
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- var imageName string
+ var imageStreamImageName string
if instanceDigest == nil {
if err := s.ensureImageIsResolved(ctx); err != nil {
return nil, err
}
- imageName = s.imageStreamImageName
+ imageStreamImageName = s.imageStreamImageName
} else {
- imageName = instanceDigest.String()
+ imageStreamImageName = instanceDigest.String()
}
- image, err := s.client.getImage(ctx, imageName)
+ image, err := s.client.getImage(ctx, imageStreamImageName)
if err != nil {
return nil, err
}
@@ -253,8 +253,15 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest
return sigs, nil
}
-// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
return nil, nil
}
@@ -414,20 +421,28 @@ func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info typ
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte) error {
- manifestDigest, err := manifest.Digest(m)
- if err != nil {
- return err
+func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest == nil {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.imageStreamImageName = manifestDigest.String()
}
- d.imageStreamImageName = manifestDigest.String()
-
- return d.docker.PutManifest(ctx, m)
+ return d.docker.PutManifest(ctx, m, instanceDigest)
}
-func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
- if d.imageStreamImageName == "" {
- return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
+func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ var imageStreamName string
+ if instanceDigest == nil {
+ if d.imageStreamImageName == "" {
+ return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
+ }
+ imageStreamName = d.imageStreamImageName
+ } else {
+ imageStreamName = instanceDigest.String()
}
+
// Because image signatures are a shared resource in Atomic Registry, the default upload
// always adds signatures. Eventually we should also allow removing signatures.
@@ -435,7 +450,7 @@ func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signature
return nil // No need to even read the old state.
}
- image, err := d.client.getImage(ctx, d.imageStreamImageName)
+ image, err := d.client.getImage(ctx, imageStreamName)
if err != nil {
return err
}
@@ -460,7 +475,7 @@ sigExists:
if err != nil || n != 16 {
return errors.Wrapf(err, "Error generating random signature len %d", n)
}
- signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes)
+ signatureName = fmt.Sprintf("%s@%032x", imageStreamName, randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
break
}
@@ -489,8 +504,8 @@ sigExists:
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *openshiftImageDestination) Commit(ctx context.Context) error {
- return d.docker.Commit(ctx)
+func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ return d.docker.Commit(ctx, unparsedToplevel)
}
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
diff --git a/vendor/github.com/containers/image/v4/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
index f00c94561..6bbb43be2 100644
--- a/vendor/github.com/containers/image/v4/openshift/openshift_transport.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
@@ -6,11 +6,11 @@ import (
"regexp"
"strings"
- "github.com/containers/image/v4/docker/policyconfiguration"
- "github.com/containers/image/v4/docker/reference"
- genericImage "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/policyconfiguration"
+ "github.com/containers/image/v5/docker/reference"
+ genericImage "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
index 9e1436e29..c442b4d2e 100644
--- a/vendor/github.com/containers/image/v4/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
@@ -20,8 +20,8 @@ import (
"time"
"unsafe"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
@@ -376,10 +376,16 @@ func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.
}
// PutManifest writes manifest to the destination.
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error {
+func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.New(`Manifest lists are not supported by "ostree:"`)
+ }
+
d.manifest = string(manifestBlob)
if err := json.Unmarshal(manifestBlob, &d.schema); err != nil {
@@ -400,7 +406,14 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [
return ioutil.WriteFile(manifestPath, manifestBlob, 0644)
}
-func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+// PutSignatures writes signatures to the destination.
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests.
+func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.New(`Manifest lists are not supported by "ostree:"`)
+ }
+
path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0))
if err := ensureParentDirectoryExists(path); err != nil {
return err
@@ -416,7 +429,7 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
return nil
}
-func (d *ostreeImageDestination) Commit(ctx context.Context) error {
+func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
index ecb6e3f84..4948ec664 100644
--- a/vendor/github.com/containers/image/v4/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
@@ -13,8 +13,8 @@ import (
"strings"
"unsafe"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
@@ -98,9 +98,11 @@ func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) {
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be non-default instances.
func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
- return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`)
+ return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`)
}
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
@@ -275,7 +277,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
- _, err := s.LayerInfosForCopy(ctx)
+ _, err := s.LayerInfosForCopy(ctx, nil)
if err != nil {
return nil, -1, err
}
@@ -337,9 +339,12 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
return rc, layerSize, nil
}
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as there can be no secondary manifests.
func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
if instanceDigest != nil {
- return nil, errors.New("manifest lists are not supported by this transport")
+ return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
}
lenSignatures, err := s.getLenSignatures()
if err != nil {
@@ -372,9 +377,18 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d
return signatures, nil
}
-// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
-// the image, after they've been decompressed.
-func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be secondary manifests.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ if instanceDigest != nil {
+ return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
+ }
+
updatedBlobInfos := []types.BlobInfo{}
manifestBlob, manifestType, err := s.GetManifest(ctx, nil)
if err != nil {
diff --git a/vendor/github.com/containers/image/v4/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
index d720cb7ac..a55147b85 100644
--- a/vendor/github.com/containers/image/v4/ostree/ostree_transport.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
@@ -11,11 +11,11 @@ import (
"regexp"
"strings"
- "github.com/containers/image/v4/directory/explicitfilepath"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
index 85eb7d6f1..9c9a17a58 100644
--- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/boltdb/boltdb.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
@@ -7,8 +7,8 @@ import (
"sync"
"time"
- "github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
+ "github.com/containers/image/v5/types"
bolt "github.com/etcd-io/bbolt"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
index af136c36d..952bcf5a1 100644
--- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/default.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
@@ -6,9 +6,9 @@ import (
"path/filepath"
"strconv"
- "github.com/containers/image/v4/pkg/blobinfocache/boltdb"
- "github.com/containers/image/v4/pkg/blobinfocache/memory"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/pkg/blobinfocache/boltdb"
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
index 7820119b0..5deca4a82 100644
--- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize/prioritize.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
@@ -6,7 +6,7 @@ import (
"sort"
"time"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
index c51b9f5ce..8f28c6623 100644
--- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/memory/memory.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
@@ -5,8 +5,8 @@ import (
"sync"
"time"
- "github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
+ "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
diff --git a/vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
index c5ce29a45..fa1879afd 100644
--- a/vendor/github.com/containers/image/v4/pkg/blobinfocache/none/none.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
@@ -2,7 +2,7 @@
package none
import (
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
diff --git a/vendor/github.com/containers/image/v4/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
index fd2f21549..04d231c6d 100644
--- a/vendor/github.com/containers/image/v4/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
@@ -7,8 +7,8 @@ import (
"io"
"io/ioutil"
- "github.com/containers/image/v4/pkg/compression/internal"
- "github.com/containers/image/v4/pkg/compression/types"
+ "github.com/containers/image/v5/pkg/compression/internal"
+ "github.com/containers/image/v5/pkg/compression/types"
"github.com/klauspost/pgzip"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
diff --git a/vendor/github.com/containers/image/v4/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
index 6092a9517..6092a9517 100644
--- a/vendor/github.com/containers/image/v4/pkg/compression/internal/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
diff --git a/vendor/github.com/containers/image/v4/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
index ea43dc8cd..f96eff2e3 100644
--- a/vendor/github.com/containers/image/v4/pkg/compression/types/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
@@ -1,7 +1,7 @@
package types
import (
- "github.com/containers/image/v4/pkg/compression/internal"
+ "github.com/containers/image/v5/pkg/compression/internal"
)
// DecompressorFunc returns the decompressed stream, given a compressed stream.
diff --git a/vendor/github.com/containers/image/v4/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go
index 962fe9676..962fe9676 100644
--- a/vendor/github.com/containers/image/v4/pkg/compression/zstd.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go
diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index e720dc865..b7dddd0d6 100644
--- a/vendor/github.com/containers/image/v4/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -9,7 +9,7 @@ import (
"path/filepath"
"strings"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
helperclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/docker/docker/pkg/homedir"
@@ -26,6 +26,11 @@ type dockerConfigFile struct {
CredHelpers map[string]string `json:"credHelpers,omitempty"`
}
+type authPath struct {
+ path string
+ legacyFormat bool
+}
+
var (
defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json")
@@ -84,28 +89,28 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
}
}
- dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath)
- var paths []string
- pathToAuth, err := getPathToAuth(sys)
+ paths := []authPath{}
+ pathToAuth, lf, err := getPathToAuth(sys)
if err == nil {
- paths = append(paths, pathToAuth)
+ paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf})
} else {
// Error means that the path set for XDG_RUNTIME_DIR does not exist
// but we don't want to completely fail in the case that the user is pulling a public image
// Logging the error as a warning instead and moving on to pulling the image
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
}
- paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath)
+ paths = append(paths,
+ authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false},
+ authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true})
for _, path := range paths {
- legacyFormat := path == dockerLegacyPath
- username, password, err := findAuthentication(registry, path, legacyFormat)
+ username, password, err := findAuthentication(registry, path.path, path.legacyFormat)
if err != nil {
logrus.Debugf("Credentials not found")
return "", "", err
}
if username != "" && password != "" {
- logrus.Debugf("Returning credentials from %s", path)
+ logrus.Debugf("Returning credentials from %s", path.path)
return username, password, nil
}
}
@@ -163,13 +168,16 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
// The path can be overriden by the user if the overwrite-path flag is set
// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers
// Otherwise, the auth.json file is stored in /run/containers/UID
-func getPathToAuth(sys *types.SystemContext) (string, error) {
+func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
if sys != nil {
if sys.AuthFilePath != "" {
- return sys.AuthFilePath, nil
+ return sys.AuthFilePath, false, nil
+ }
+ if sys.LegacyFormatAuthFilePath != "" {
+ return sys.LegacyFormatAuthFilePath, true, nil
}
if sys.RootForImplicitAbsolutePaths != "" {
- return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
}
}
@@ -182,11 +190,11 @@ func getPathToAuth(sys *types.SystemContext) (string, error) {
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
// or made a typo while setting the environment variable,
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
- return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
+ return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
} // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
- return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil
+ return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil
}
- return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil
+ return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil
}
// readJSONFile unmarshals the authentications stored in the auth.json file and returns it
@@ -220,7 +228,7 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
// modifyJSON writes to auth.json if the dockerConfigFile has been updated
func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error {
- path, err := getPathToAuth(sys)
+ path, legacyFormat, err := getPathToAuth(sys)
if err != nil {
return err
}
@@ -232,6 +240,9 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
}
}
+ if legacyFormat {
+ return fmt.Errorf("writes to %s using legacy format are not supported", path)
+ }
auths, err := readJSONFile(path, false)
if err != nil {
return errors.Wrapf(err, "error reading JSON file %q", path)
diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
index 0cd73528b..43f2d5a85 100644
--- a/vendor/github.com/containers/image/v4/pkg/docker/config/config_linux.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
- "github.com/containers/image/v4/internal/pkg/keyctl"
+ "github.com/containers/image/v5/internal/pkg/keyctl"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
diff --git a/vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
index 9b0e8bee2..9b0e8bee2 100644
--- a/vendor/github.com/containers/image/v4/pkg/docker/config/config_unsupported.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
diff --git a/vendor/github.com/containers/image/v4/pkg/strslice/README.md b/vendor/github.com/containers/image/v5/pkg/strslice/README.md
index ae6097e82..ae6097e82 100644
--- a/vendor/github.com/containers/image/v4/pkg/strslice/README.md
+++ b/vendor/github.com/containers/image/v5/pkg/strslice/README.md
diff --git a/vendor/github.com/containers/image/v4/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go
index bad493fb8..bad493fb8 100644
--- a/vendor/github.com/containers/image/v4/pkg/strslice/strslice.go
+++ b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go
diff --git a/vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index 0cd60778f..ff802cefd 100644
--- a/vendor/github.com/containers/image/v4/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -10,11 +10,10 @@ import (
"sync"
"github.com/BurntSushi/toml"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
-
- "github.com/containers/image/v4/docker/reference"
)
// systemRegistriesConfPath is the path to the system-wide registry
diff --git a/vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
index 6785564e8..6785564e8 100644
--- a/vendor/github.com/containers/image/v4/pkg/tlsclientconfig/tlsclientconfig.go
+++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
diff --git a/vendor/github.com/containers/image/v4/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go
index c3ac33d48..07fdd42a9 100644
--- a/vendor/github.com/containers/image/v4/signature/docker.go
+++ b/vendor/github.com/containers/image/v5/signature/docker.go
@@ -5,8 +5,8 @@ package signature
import (
"fmt"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/manifest"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
"github.com/opencontainers/go-digest"
)
diff --git a/vendor/github.com/containers/image/v4/signature/json.go b/vendor/github.com/containers/image/v5/signature/json.go
index 9e592863d..9e592863d 100644
--- a/vendor/github.com/containers/image/v4/signature/json.go
+++ b/vendor/github.com/containers/image/v5/signature/json.go
diff --git a/vendor/github.com/containers/image/v4/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go
index bdf26c531..bdf26c531 100644
--- a/vendor/github.com/containers/image/v4/signature/mechanism.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism.go
diff --git a/vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
index 4825ab27c..4825ab27c 100644
--- a/vendor/github.com/containers/image/v4/signature/mechanism_gpgme.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
diff --git a/vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
index eccd610c9..eccd610c9 100644
--- a/vendor/github.com/containers/image/v4/signature/mechanism_openpgp.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
diff --git a/vendor/github.com/containers/image/v4/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go
index bb229f5f1..3eee70bc2 100644
--- a/vendor/github.com/containers/image/v4/signature/policy_config.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_config.go
@@ -19,9 +19,9 @@ import (
"io/ioutil"
"path/filepath"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go
index 110d40f7c..e94de2a9c 100644
--- a/vendor/github.com/containers/image/v4/signature/policy_eval.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go
@@ -8,7 +8,7 @@ package signature
import (
"context"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
index 18fb651d1..55cdd3054 100644
--- a/vendor/github.com/containers/image/v4/signature/policy_eval_baselayer.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
@@ -5,7 +5,7 @@ package signature
import (
"context"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
index b8188da5e..26cca4759 100644
--- a/vendor/github.com/containers/image/v4/signature/policy_eval_signedby.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
@@ -8,11 +8,10 @@ import (
"io/ioutil"
"strings"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
-
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/types"
- "github.com/opencontainers/go-digest"
)
func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
diff --git a/vendor/github.com/containers/image/v4/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
index 7fbcf4a94..f949088b5 100644
--- a/vendor/github.com/containers/image/v4/signature/policy_eval_simple.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
@@ -6,8 +6,8 @@ import (
"context"
"fmt"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
)
func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
diff --git a/vendor/github.com/containers/image/v4/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
index 016d737fb..a148ede52 100644
--- a/vendor/github.com/containers/image/v4/signature/policy_reference_match.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
@@ -5,9 +5,9 @@ package signature
import (
"fmt"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
)
// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
diff --git a/vendor/github.com/containers/image/v4/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go
index d3b33bb7a..d3b33bb7a 100644
--- a/vendor/github.com/containers/image/v4/signature/policy_types.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_types.go
diff --git a/vendor/github.com/containers/image/v4/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go
index 09c4de0b3..44e70b3b9 100644
--- a/vendor/github.com/containers/image/v4/signature/signature.go
+++ b/vendor/github.com/containers/image/v5/signature/signature.go
@@ -9,10 +9,9 @@ import (
"fmt"
"time"
+ "github.com/containers/image/v5/version"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
-
- "github.com/containers/image/v4/version"
- "github.com/opencontainers/go-digest"
)
const (
diff --git a/vendor/github.com/containers/image/v4/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index 4e913b84c..2b89f329f 100644
--- a/vendor/github.com/containers/image/v4/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -6,6 +6,7 @@ import (
"bytes"
"context"
"encoding/json"
+ stderrors "errors"
"fmt"
"io"
"io/ioutil"
@@ -14,12 +15,12 @@ import (
"sync"
"sync/atomic"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/internal/tmpdir"
- "github.com/containers/image/v4/manifest"
- "github.com/containers/image/v4/pkg/blobinfocache/none"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
@@ -32,38 +33,38 @@ import (
var (
// ErrBlobDigestMismatch is returned when PutBlob() is given a blob
// with a digest-based name that doesn't match its contents.
- ErrBlobDigestMismatch = errors.New("blob digest mismatch")
+ ErrBlobDigestMismatch = stderrors.New("blob digest mismatch")
// ErrBlobSizeMismatch is returned when PutBlob() is given a blob
// with an expected size that doesn't match the reader.
- ErrBlobSizeMismatch = errors.New("blob size mismatch")
- // ErrNoManifestLists is returned when GetManifest() is called.
- // with a non-nil instanceDigest.
- ErrNoManifestLists = errors.New("manifest lists are not supported by this transport")
+ ErrBlobSizeMismatch = stderrors.New("blob size mismatch")
// ErrNoSuchImage is returned when we attempt to access an image which
// doesn't exist in the storage area.
ErrNoSuchImage = storage.ErrNotAnImage
)
type storageImageSource struct {
- imageRef storageReference
- image *storage.Image
- layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
- cachedManifest []byte // A cached copy of the manifest, if already known, or nil
- getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ imageRef storageReference
+ image *storage.Image
+ layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
+ cachedManifest []byte // A cached copy of the manifest, if already known, or nil
+ getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
}
type storageImageDestination struct {
- imageRef storageReference
- directory string // Temporary directory where we store blobs until Commit() time
- nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
- manifest []byte // Manifest contents, temporary
- signatures []byte // Signature contents, temporary
- putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
- blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
- fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
- filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ imageRef storageReference
+ directory string // Temporary directory where we store blobs until Commit() time
+ nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
+ manifest []byte // Manifest contents, temporary
+ signatures []byte // Signature contents, temporary
+ signatureses map[digest.Digest][]byte // Instance signature contents, temporary
+ putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
+ blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
+ fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
+ filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
}
type storageImageCloser struct {
@@ -72,26 +73,33 @@ type storageImageCloser struct {
}
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
-// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably;
+// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
func manifestBigDataKey(digest digest.Digest) string {
return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
}
+// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
+// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
+func signatureBigDataKey(digest digest.Digest) string {
+ return "signature-" + digest.Encoded()
+}
+
// newImageSource sets up an image for reading.
-func newImageSource(imageRef storageReference) (*storageImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
// First, locate the image.
- img, err := imageRef.resolveImage()
+ img, err := imageRef.resolveImage(sys)
if err != nil {
return nil, err
}
// Build the reader object.
image := &storageImageSource{
- imageRef: imageRef,
- image: img,
- layerPosition: make(map[digest.Digest]int),
- SignatureSizes: []int{},
+ imageRef: imageRef,
+ image: img,
+ layerPosition: make(map[digest.Digest]int),
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
}
if img.Metadata != "" {
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
@@ -182,7 +190,12 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
// GetManifest() reads the image's manifest.
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
if instanceDigest != nil {
- return nil, "", ErrNoManifestLists
+ key := manifestBigDataKey(*instanceDigest)
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "error reading manifest for image instance %q", *instanceDigest)
+ }
+ return blob, manifest.GuessMIMEType(blob), err
}
if len(s.cachedManifest) == 0 {
// The manifest is stored as a big data item.
@@ -214,11 +227,14 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
// the image, after they've been decompressed.
-func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- manifestBlob, manifestType, err := s.GetManifest(ctx, nil)
+func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
if err != nil {
return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID)
}
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)")
+ }
man, err := manifest.FromBlob(manifestBlob, manifestType)
if err != nil {
return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID)
@@ -292,25 +308,33 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []
// GetSignatures() parses the image's signatures blob into a slice of byte slices.
func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) {
- if instanceDigest != nil {
- return nil, ErrNoManifestLists
- }
var offset int
sigslice := [][]byte{}
signature := []byte{}
- if len(s.SignatureSizes) > 0 {
- signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, "signatures")
+ signatureSizes := s.SignatureSizes
+ key := "signatures"
+ instance := "default instance"
+ if instanceDigest != nil {
+ signatureSizes = s.SignaturesSizes[*instanceDigest]
+ key = signatureBigDataKey(*instanceDigest)
+ instance = instanceDigest.Encoded()
+ }
+ if len(signatureSizes) > 0 {
+ signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.image.ID)
+ return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s)", s.image.ID, instance)
}
signature = signatureBlob
}
- for _, length := range s.SignatureSizes {
+ for _, length := range signatureSizes {
+ if offset+length > len(signature) {
+ return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length)
+ }
sigslice = append(sigslice, signature[offset:offset+length])
offset += length
}
if offset != len(signature) {
- return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
+ return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset)
}
return sigslice, nil
}
@@ -323,12 +347,14 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
return nil, errors.Wrapf(err, "error creating a temporary directory")
}
image := &storageImageDestination{
- imageRef: imageRef,
- directory: directory,
- blobDiffIDs: make(map[digest.Digest]digest.Digest),
- fileSizes: make(map[digest.Digest]int64),
- filenames: make(map[digest.Digest]string),
- SignatureSizes: []int{},
+ imageRef: imageRef,
+ directory: directory,
+ signatureses: make(map[digest.Digest][]byte),
+ blobDiffIDs: make(map[digest.Digest]digest.Digest),
+ fileSizes: make(map[digest.Digest]int64),
+ filenames: make(map[digest.Digest]string),
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
}
return image, nil
}
@@ -404,10 +430,10 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
// Ensure that any information that we were given about the blob is correct.
if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() {
- return errorBlobInfo, ErrBlobDigestMismatch
+ return errorBlobInfo, errors.WithStack(ErrBlobDigestMismatch)
}
if blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
- return errorBlobInfo, ErrBlobSizeMismatch
+ return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch)
}
// Record information about the blob.
s.putBlobMutex.Lock()
@@ -579,7 +605,34 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
return nil, errors.New("blob not found")
}
-func (s *storageImageDestination) Commit(ctx context.Context) error {
+func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ if len(s.manifest) == 0 {
+ return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
+ }
+ toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving top-level manifest")
+ }
+ // If the name we're saving to includes a digest, then check that the
+ // manifests that we're about to save all either match the one from the
+ // unparsedToplevel, or match the digest in the name that we're using.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ if !matches {
+ matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ }
+ if !matches {
+ return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
+ }
+ }
+ }
// Find the list of layer blobs.
if len(s.manifest) == 0 {
return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
@@ -747,7 +800,8 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID)
}
}
- // Set the reference's name on the image.
+ // Set the reference's name on the image. We don't need to worry about avoiding duplicate
+ // values because SetNames() will deduplicate the list that we pass to it.
if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil {
names := []string{}
if name != nil {
@@ -765,26 +819,43 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
}
logrus.Debugf("set names of image %q to %v", img.ID, names)
}
- // Save the manifest. Allow looking it up by digest by using the key convention defined by the Store.
+ // Save the unparsedToplevel's manifest.
+ if len(toplevelManifest) != 0 {
+ manifestDigest, err := manifest.Digest(toplevelManifest)
+ if err != nil {
+ return errors.Wrapf(err, "error digesting top-level manifest")
+ }
+ key := manifestBigDataKey(manifestDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
+ return errors.Wrapf(err, "error saving top-level manifest for image %q", img.ID)
+ }
+ }
+ // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
manifestDigest, err := manifest.Digest(s.manifest)
if err != nil {
return errors.Wrapf(err, "error computing manifest digest")
}
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil {
+ key := manifestBigDataKey(manifestDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
- return err
+ return errors.Wrapf(err, "error saving manifest for image %q", img.ID)
}
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil {
+ key = storage.ImageDigestBigDataKey
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
- return err
+ return errors.Wrapf(err, "error saving manifest for image %q", img.ID)
}
// Save the signatures, if we have any.
if len(s.signatures) > 0 {
@@ -793,7 +864,17 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return err
+ return errors.Wrapf(err, "error saving signatures for image %q", img.ID)
+ }
+ }
+ for instanceDigest, signatures := range s.signatureses {
+ key := signatureBigDataKey(instanceDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
+ return errors.Wrapf(err, "error saving signatures for image %q", img.ID)
}
}
// Save our metadata.
@@ -803,7 +884,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
- return err
+ return errors.Wrapf(err, "error encoding metadata for image %q", img.ID)
}
if len(metadata) != 0 {
if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
@@ -811,7 +892,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
- return err
+ return errors.Wrapf(err, "error saving metadata for image %q", img.ID)
}
logrus.Debugf("saved image metadata %q", string(metadata))
}
@@ -830,21 +911,10 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
}
// PutManifest writes the manifest to the destination.
-func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error {
- if s.imageRef.named != nil {
- if digested, ok := s.imageRef.named.(reference.Digested); ok {
- matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest())
- if err != nil {
- return err
- }
- if !matches {
- return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest())
- }
- }
- }
-
- s.manifest = make([]byte, len(manifestBlob))
- copy(s.manifest, manifestBlob)
+func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
+ newBlob := make([]byte, len(manifestBlob))
+ copy(newBlob, manifestBlob)
+ s.manifest = newBlob
return nil
}
@@ -873,7 +943,7 @@ func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool {
}
// PutSignatures records the image's signatures for committing as a single data blob.
-func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
sizes := []int{}
sigblob := []byte{}
for _, sig := range signatures {
@@ -883,8 +953,21 @@ func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures
copy(newblob[len(sigblob):], sig)
sigblob = newblob
}
- s.signatures = sigblob
- s.SignatureSizes = sizes
+ if instanceDigest == nil {
+ s.signatures = sigblob
+ s.SignatureSizes = sizes
+ }
+ if instanceDigest == nil && len(s.manifest) > 0 {
+ manifestDigest, err := manifest.Digest(s.manifest)
+ if err != nil {
+ return err
+ }
+ instanceDigest = &manifestDigest
+ }
+ if instanceDigest != nil {
+ s.signatureses[*instanceDigest] = sigblob
+ s.SignaturesSizes[*instanceDigest] = sizes
+ }
return nil
}
@@ -940,7 +1023,7 @@ func (s *storageImageCloser) Size() (int64, error) {
// newImage creates an image that also knows its size
func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) {
- src, err := newImageSource(s)
+ src, err := newImageSource(ctx, sys, s)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v4/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go
index 7ad20817b..4e137ad1b 100644
--- a/vendor/github.com/containers/image/v4/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go
@@ -6,9 +6,12 @@ import (
"context"
"strings"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -51,9 +54,79 @@ func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
return false
}
+// imageMatchesSystemContext checks if the passed-in image both contains a
+// manifest that matches the passed-in digest, and identifies itself as being
+// appropriate for running on the system that matches sys.
+// If we somehow ended up sharing the same storage among multiple types of
+// systems, and managed to download multiple images from the same manifest
+// list, their image records will all contain copies of the manifest list, and
+// this check will help us decide which of them we want to return when we've
+// been asked to resolve an image reference that uses the list's digest to a
+// specific image ID.
+func imageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool {
+ // First, check if the image record has a manifest that matches the
+ // specified digest.
+ key := manifestBigDataKey(manifestDigest)
+ manifestBytes, err := store.ImageBigData(img.ID, key)
+ if err != nil {
+ return false
+ }
+ // The manifest is either a list, or not a list. If it's a list, find
+ // the digest of the instance that matches the current system, and try
+ // to load that manifest from the image record, and use it.
+ manifestType := manifest.GuessMIMEType(manifestBytes)
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ list, err := manifest.ListFromBlob(manifestBytes, manifestType)
+ if err != nil {
+ return false
+ }
+ manifestDigest, err = list.ChooseInstance(sys)
+ if err != nil {
+ return false
+ }
+ key = manifestBigDataKey(manifestDigest)
+ manifestBytes, err = store.ImageBigData(img.ID, key)
+ if err != nil {
+ return false
+ }
+ manifestType = manifest.GuessMIMEType(manifestBytes)
+ }
+ // Load the image's configuration blob.
+ m, err := manifest.FromBlob(manifestBytes, manifestType)
+ getConfig := func(blobInfo types.BlobInfo) ([]byte, error) {
+ return store.ImageBigData(img.ID, blobInfo.Digest.String())
+ }
+ ii, err := m.Inspect(getConfig)
+ if err != nil {
+ return false
+ }
+ // Build a dummy index containing one instance and information about
+ // the image's target system from the image's configuration.
+ index := manifest.OCI1IndexFromComponents([]imgspecv1.Descriptor{{
+ MediaType: imgspecv1.MediaTypeImageManifest,
+ Digest: manifestDigest,
+ Size: int64(len(manifestBytes)),
+ Platform: &imgspecv1.Platform{
+ OS: ii.Os,
+ Architecture: ii.Architecture,
+ },
+ }}, nil)
+ // Check that ChooseInstance() would select this image for this system,
+ // from a list of images.
+ instanceDigest, err := index.ChooseInstance(sys)
+ if err != nil {
+ return false
+ }
+ // Double-check that we can read the runnable image's manifest from the
+ // image record.
+ key = manifestBigDataKey(instanceDigest)
+ _, err = store.ImageBigData(img.ID, key)
+ return err == nil
+}
+
// Resolve the reference's name to an image ID in the store, if there's already
// one present with the same name or ID, and return the image.
-func (s *storageReference) resolveImage() (*storage.Image, error) {
+func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) {
var loadedImage *storage.Image
if s.id == "" && s.named != nil {
// Look for an image that has the expanded reference name as an explicit Name value.
@@ -72,9 +145,10 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
if err == nil && len(images) > 0 {
for _, image := range images {
if imageMatchesRepo(image, s.named) {
- loadedImage = image
- s.id = image.ID
- break
+ if loadedImage == nil || imageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) {
+ loadedImage = image
+ s.id = image.ID
+ }
}
}
}
@@ -202,7 +276,7 @@ func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext
}
func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- img, err := s.resolveImage()
+ img, err := s.resolveImage(sys)
if err != nil {
return err
}
@@ -217,7 +291,7 @@ func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemCont
}
func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
- return newImageSource(s)
+ return newImageSource(ctx, sys, s)
}
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
diff --git a/vendor/github.com/containers/image/v4/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go
index 48b909c03..62a091da4 100644
--- a/vendor/github.com/containers/image/v4/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go
@@ -7,14 +7,13 @@ import (
"path/filepath"
"strings"
- "github.com/pkg/errors"
-
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -288,7 +287,7 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe
}
if sref, ok := ref.(*storageReference); ok {
tmpRef := *sref
- if img, err := tmpRef.resolveImage(); err == nil {
+ if img, err := tmpRef.resolveImage(&types.SystemContext{}); err == nil {
return img, nil
}
}
diff --git a/vendor/github.com/containers/image/v4/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go
index ebbe156bd..ead2d4263 100644
--- a/vendor/github.com/containers/image/v4/tarball/doc.go
+++ b/vendor/github.com/containers/image/v5/tarball/doc.go
@@ -7,11 +7,10 @@
// import (
// "fmt"
//
-// cp "github.com/containers/image/v4/copy"
-// "github.com/containers/image/v4/tarball"
-// "github.com/containers/image/v4/transports/alltransports"
-//
-// imgspecv1 "github.com/containers/image/v4/transports/alltransports"
+// cp "github.com/containers/image/v5/copy"
+// "github.com/containers/image/v5/tarball"
+// "github.com/containers/image/v5/transports/alltransports"
+// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
// )
//
// func imageFromTarball() {
diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
index d33c20de1..00150c53b 100644
--- a/vendor/github.com/containers/image/v4/tarball/tarball_reference.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
@@ -6,9 +6,9 @@ import (
"os"
"strings"
- "github.com/containers/image/v4/docker/reference"
- "github.com/containers/image/v4/image"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
index ead1a50bd..694ad17bd 100644
--- a/vendor/github.com/containers/image/v4/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
@@ -12,7 +12,7 @@ import (
"strings"
"time"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go"
@@ -248,9 +248,8 @@ func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *d
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as there can be no secondary manifests.
func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
if instanceDigest != nil {
return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
@@ -262,7 +261,14 @@ func (is *tarballImageSource) Reference() types.ImageReference {
return &is.reference
}
-// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (*tarballImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
return nil, nil
}
diff --git a/vendor/github.com/containers/image/v4/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
index 84874cfbf..113545cb7 100644
--- a/vendor/github.com/containers/image/v4/tarball/tarball_transport.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
@@ -7,8 +7,8 @@ import (
"os"
"strings"
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
)
const (
diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
index ae68fb8e6..2110a091d 100644
--- a/vendor/github.com/containers/image/v4/transports/alltransports/alltransports.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
@@ -6,17 +6,17 @@ import (
// register all known transports
// NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating
// a transport.
- _ "github.com/containers/image/v4/directory"
- _ "github.com/containers/image/v4/docker"
- _ "github.com/containers/image/v4/docker/archive"
- _ "github.com/containers/image/v4/oci/archive"
- _ "github.com/containers/image/v4/oci/layout"
- _ "github.com/containers/image/v4/openshift"
- _ "github.com/containers/image/v4/tarball"
+ _ "github.com/containers/image/v5/directory"
+ _ "github.com/containers/image/v5/docker"
+ _ "github.com/containers/image/v5/docker/archive"
+ _ "github.com/containers/image/v5/oci/archive"
+ _ "github.com/containers/image/v5/oci/layout"
+ _ "github.com/containers/image/v5/openshift"
+ _ "github.com/containers/image/v5/tarball"
// The ostree transport is registered by ostree*.go
// The storage transport is registered by storage*.go
- "github.com/containers/image/v4/transports"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go
index d3fc18b2c..82224052e 100644
--- a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go
@@ -4,5 +4,5 @@ package alltransports
import (
// Register the docker-daemon transport
- _ "github.com/containers/image/v4/docker/daemon"
+ _ "github.com/containers/image/v5/docker/daemon"
)
diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go
index 82e055377..d13700799 100644
--- a/vendor/github.com/containers/image/v4/transports/alltransports/docker_daemon_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go
@@ -2,7 +2,7 @@
package alltransports
-import "github.com/containers/image/v4/transports"
+import "github.com/containers/image/v5/transports"
func init() {
transports.Register(transports.NewStubTransport("docker-daemon"))
diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go
index 891696616..72432d1ef 100644
--- a/vendor/github.com/containers/image/v4/transports/alltransports/ostree.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go
@@ -4,5 +4,5 @@ package alltransports
import (
// Register the ostree transport
- _ "github.com/containers/image/v4/ostree"
+ _ "github.com/containers/image/v5/ostree"
)
diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go
index 892518d5c..f4a862bd4 100644
--- a/vendor/github.com/containers/image/v4/transports/alltransports/ostree_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go
@@ -2,7 +2,7 @@
package alltransports
-import "github.com/containers/image/v4/transports"
+import "github.com/containers/image/v5/transports"
func init() {
transports.Register(transports.NewStubTransport("ostree"))
diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go
index 96f3e17fc..7041eb876 100644
--- a/vendor/github.com/containers/image/v4/transports/alltransports/storage.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go
@@ -4,5 +4,5 @@ package alltransports
import (
// Register the storage transport
- _ "github.com/containers/image/v4/storage"
+ _ "github.com/containers/image/v5/storage"
)
diff --git a/vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go
index 14c942116..67f0291cc 100644
--- a/vendor/github.com/containers/image/v4/transports/alltransports/storage_stub.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go
@@ -2,7 +2,7 @@
package alltransports
-import "github.com/containers/image/v4/transports"
+import "github.com/containers/image/v5/transports"
func init() {
transports.Register(transports.NewStubTransport("containers-storage"))
diff --git a/vendor/github.com/containers/image/v4/transports/stub.go b/vendor/github.com/containers/image/v5/transports/stub.go
index e3ee62031..2c186a90c 100644
--- a/vendor/github.com/containers/image/v4/transports/stub.go
+++ b/vendor/github.com/containers/image/v5/transports/stub.go
@@ -3,7 +3,7 @@ package transports
import (
"fmt"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
)
// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
diff --git a/vendor/github.com/containers/image/v4/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go
index 8bdb46b4b..46ee3710f 100644
--- a/vendor/github.com/containers/image/v4/transports/transports.go
+++ b/vendor/github.com/containers/image/v5/transports/transports.go
@@ -5,7 +5,7 @@ import (
"sort"
"sync"
- "github.com/containers/image/v4/types"
+ "github.com/containers/image/v5/types"
)
// knownTransports is a registry of known ImageTransport instances.
diff --git a/vendor/github.com/containers/image/v4/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index af11a2b21..2db8c7827 100644
--- a/vendor/github.com/containers/image/v4/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -5,8 +5,8 @@ import (
"io"
"time"
- "github.com/containers/image/v4/docker/reference"
- compression "github.com/containers/image/v4/pkg/compression/types"
+ "github.com/containers/image/v5/docker/reference"
+ compression "github.com/containers/image/v5/pkg/compression/types"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -227,10 +227,15 @@ type ImageSource interface {
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error)
- // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest.
+ // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+ // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+ // to read the image's layers.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+ // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+ // (e.g. if the source never returns manifest lists).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
- LayerInfosForCopy(ctx context.Context) ([]BlobInfo, error)
+ LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error)
}
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
@@ -286,16 +291,24 @@ type ImageDestination interface {
// May use and/or update cache.
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
// PutManifest writes manifest to the destination.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for
+ // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+ // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
+ // by `manifest.Digest()`.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
- PutManifest(ctx context.Context, manifest []byte) error
- PutSignatures(ctx context.Context, signatures [][]byte) error
+ PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error
+ // PutSignatures writes a set of signatures to the destination.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+ // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+ // MUST be called after PutManifest (signatures may reference manifest contents).
+ PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
- Commit(ctx context.Context) error
+ Commit(ctx context.Context, unparsedToplevel UnparsedImage) error
}
// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available,
@@ -462,8 +475,15 @@ type SystemContext struct {
RegistriesDirPath string
// Path to the system-wide registries configuration file
SystemRegistriesConfPath string
- // If not "", overrides the default path for the authentication file
+ // If not "", overrides the default path for the authentication file, but only new format files
AuthFilePath string
+ // if not "", overrides the default path for the authentication file, but with the legacy format;
+ // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir;
+ // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount)
+ // in locations other than the home dir; openshift components should then set this field in those cases;
+ // this field is ignored if `AuthFilePath` is set (we favor the newer format);
+ // only reading of this data is supported;
+ LegacyFormatAuthFilePath string
// If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
diff --git a/vendor/github.com/containers/image/v4/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 2fa6706df..572be2b89 100644
--- a/vendor/github.com/containers/image/v4/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -4,11 +4,11 @@ import "fmt"
const (
// VersionMajor is for an API incompatible changes
- VersionMajor = 4
+ VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 0
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 1
+ VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
index 0c74e15b0..58b19b6d1 100644
--- a/vendor/github.com/docker/docker/NOTICE
+++ b/vendor/github.com/docker/docker/NOTICE
@@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
-This product contains software (https://github.com/kr/pty) developed
+This product contains software (https://github.com/creack/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index 38ca5329e..cc2451f03 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -3287,7 +3287,7 @@ definitions:
<p><br /></p>
- - "ingress" makes the target port accessible on on every node,
+ - "ingress" makes the target port accessible on every node,
regardless of whether there is a task for the service running on
that node or not.
- "host" bypasses the routing mesh and publish the port directly on
@@ -3305,8 +3305,8 @@ definitions:
type: "object"
properties:
Mode:
- description: "The mode of resolution to use for internal load balancing
- between tasks."
+ description: |
+ The mode of resolution to use for internal load balancing between tasks.
type: "string"
enum:
- "vip"
@@ -4873,7 +4873,7 @@ paths:
Note that a running container can be _paused_. The `Running` and `Paused`
booleans are not mutually exclusive:
- When pausing a container (on Linux), the cgroups freezer is used to suspend
+ When pausing a container (on Linux), the freezer cgroup is used to suspend
all processes in the container. Freezing the process requires the process to
be running. As a result, paused containers are both `Running` _and_ `Paused`.
@@ -5543,8 +5543,6 @@ paths:
description: "no error"
304:
description: "container already started"
- schema:
- $ref: "#/definitions/ErrorResponse"
404:
description: "no such container"
schema:
@@ -5576,8 +5574,6 @@ paths:
description: "no error"
304:
description: "container already stopped"
- schema:
- $ref: "#/definitions/ErrorResponse"
404:
description: "no such container"
schema:
@@ -5768,9 +5764,9 @@ paths:
post:
summary: "Pause a container"
description: |
- Use the cgroups freezer to suspend all processes in a container.
+ Use the freezer cgroup to suspend all processes in a container.
- Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
+ Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
operationId: "ContainerPause"
responses:
204:
@@ -6493,10 +6489,11 @@ paths:
type: "string"
- name: "networkmode"
in: "query"
- description: "Sets the networking mode for the run commands during
- build. Supported standard values are: `bridge`, `host`, `none`, and
- `container:<name|id>`. Any other value is taken as a custom network's
- name to which this container should connect to."
+ description: |
+ Sets the networking mode for the run commands during build. Supported
+ standard values are: `bridge`, `host`, `none`, and `container:<name|id>`.
+ Any other value is taken as a custom network's name or ID to which this
+ container should connect to.
type: "string"
- name: "Content-type"
in: "header"
@@ -9585,17 +9582,19 @@ paths:
type: "integer"
- name: "registryAuthFrom"
in: "query"
+ description: |
+ If the `X-Registry-Auth` header is not specified, this parameter
+ indicates where to find registry authorization credentials.
type: "string"
- description: "If the X-Registry-Auth header is not specified, this
- parameter indicates where to find registry authorization credentials. The
- valid values are `spec` and `previous-spec`."
+ enum: ["spec", "previous-spec"]
default: "spec"
- name: "rollback"
in: "query"
+ description: |
+ Set to this parameter to `previous` to cause a server-side rollback
+ to the previous service spec. The supplied spec will be ignored in
+ this case.
type: "string"
- description: "Set to this parameter to `previous` to cause a
- server-side rollback to the previous service spec. The supplied spec will be
- ignored in this case."
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go
index 654c88106..209f33eb9 100644
--- a/vendor/github.com/docker/docker/api/types/container/host_config.go
+++ b/vendor/github.com/docker/docker/api/types/container/host_config.go
@@ -7,7 +7,7 @@ import (
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
- "github.com/docker/go-units"
+ units "github.com/docker/go-units"
)
// CgroupnsMode represents the cgroup namespace mode of the container
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
index 1f75403f7..2e24e769c 100644
--- a/vendor/github.com/docker/docker/api/types/filters/parse.go
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -57,7 +57,7 @@ func ToJSON(a Args) (string, error) {
// then the encoded format will use an older legacy format where the values are a
// list of strings, instead of a set.
//
-// Deprecated: Use ToJSON
+// Deprecated: do not use in any new code; use ToJSON instead
func ToParamWithVersion(version string, a Args) (string, error) {
if a.Len() == 0 {
return "", nil
diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go
index 8789ad3b3..53e47084c 100644
--- a/vendor/github.com/docker/docker/api/types/registry/registry.go
+++ b/vendor/github.com/docker/docker/api/types/registry/registry.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"net"
- "github.com/opencontainers/image-spec/specs-go/v1"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ServiceConfig stores daemon registry services configuration.
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
index b13d9c4c7..4cf9a95ff 100644
--- a/vendor/github.com/docker/docker/api/types/types.go
+++ b/vendor/github.com/docker/docker/api/types/types.go
@@ -39,6 +39,7 @@ type ImageInspect struct {
Author string
Config *container.Config
Architecture string
+ Variant string `json:",omitempty"`
Os string
OsVersion string `json:",omitempty"`
Size int64
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index b63d4d6d4..0649a69cc 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -252,7 +252,8 @@ func (cli *Client) DaemonHost() string {
// HTTPClient returns a copy of the HTTP client bound to the server
func (cli *Client) HTTPClient() *http.Client {
- return &*cli.client
+ c := *cli.client
+ return &c
}
// ParseHostURL parses a url string, validates the string is a host url, and
diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go
index 1e7a63a9c..c099d80e2 100644
--- a/vendor/github.com/docker/docker/client/container_list.go
+++ b/vendor/github.com/docker/docker/client/container_list.go
@@ -35,6 +35,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis
}
if options.Filters.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
if err != nil {
diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go
index 6e5653895..f347cadf1 100644
--- a/vendor/github.com/docker/docker/client/events.go
+++ b/vendor/github.com/docker/docker/client/events.go
@@ -90,6 +90,7 @@ func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url
}
if options.Filters.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters)
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
index e9c9a752f..e77084af6 100644
--- a/vendor/github.com/docker/docker/client/hijack.go
+++ b/vendor/github.com/docker/docker/client/hijack.go
@@ -87,6 +87,8 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto
// Server hijacks the connection, error 'connection closed' expected
resp, err := clientconn.Do(req)
+
+ //lint:ignore SA1019 for connecting to old (pre go1.8) daemons
if err != httputil.ErrPersistEOF {
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
index 4fa8c006b..a5bc4b095 100644
--- a/vendor/github.com/docker/docker/client/image_list.go
+++ b/vendor/github.com/docker/docker/client/image_list.go
@@ -24,6 +24,7 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions
}
}
if optionFilters.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters)
if err != nil {
return images, err
diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go
index 7130c1364..8ca7eb612 100644
--- a/vendor/github.com/docker/docker/client/network_list.go
+++ b/vendor/github.com/docker/docker/client/network_list.go
@@ -13,6 +13,7 @@ import (
func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
query := url.Values{}
if options.Filters.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
index 8285cecd6..a51c930e6 100644
--- a/vendor/github.com/docker/docker/client/plugin_list.go
+++ b/vendor/github.com/docker/docker/client/plugin_list.go
@@ -15,6 +15,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P
query := url.Values{}
if filter.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
if err != nil {
return plugins, err
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
index 3078335e2..144c41636 100644
--- a/vendor/github.com/docker/docker/client/request.go
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -50,15 +50,6 @@ func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, b
return cli.sendRequest(ctx, "POST", path, query, body, headers)
}
-// put sends an http request to the docker API using the method PUT.
-func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
- body, headers, err := encodeBody(obj, headers)
- if err != nil {
- return serverResponse{}, err
- }
- return cli.sendRequest(ctx, "PUT", path, query, body, headers)
-}
-
// putRaw sends an http request to the docker API using the method PUT.
func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
return cli.sendRequest(ctx, "PUT", path, query, body, headers)
@@ -178,7 +169,13 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
// this is localised - for example in French the error would be
// `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
- err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.")
+ // Checks if client is running with elevated privileges
+ if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil {
+ err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.")
+ } else {
+ f.Close()
+ err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.")
+ }
}
return serverResp, errors.Wrap(err, "error during connect")
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
index 620fc6cff..56bfe55b7 100644
--- a/vendor/github.com/docker/docker/client/service_create.go
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -9,7 +9,7 @@ import (
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go
index 2380d5638..d68fc2b98 100644
--- a/vendor/github.com/docker/docker/client/volume_list.go
+++ b/vendor/github.com/docker/docker/client/volume_list.go
@@ -15,6 +15,7 @@ func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumet
query := url.Values{}
if filter.Len() > 0 {
+ //lint:ignore SA1019 for old code
filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
if err != nil {
return volumes, err
diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go
index ac9bf6d33..07552f1cc 100644
--- a/vendor/github.com/docker/docker/errdefs/http_helpers.go
+++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go
@@ -4,6 +4,7 @@ import (
"fmt"
"net/http"
+ containerderrors "github.com/containerd/containerd/errdefs"
"github.com/docker/distribution/registry/api/errcode"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
@@ -47,6 +48,10 @@ func GetHTTPErrorStatusCode(err error) int {
if statusCode != http.StatusInternalServerError {
return statusCode
}
+ statusCode = statusCodeFromContainerdError(err)
+ if statusCode != http.StatusInternalServerError {
+ return statusCode
+ }
statusCode = statusCodeFromDistributionError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
@@ -136,9 +141,6 @@ func statusCodeFromGRPCError(err error) int {
case codes.Unavailable: // code 14
return http.StatusServiceUnavailable
default:
- if e, ok := err.(causer); ok {
- return statusCodeFromGRPCError(e.Cause())
- }
// codes.Canceled(1)
// codes.Unknown(2)
// codes.DeadlineExceeded(4)
@@ -163,10 +165,27 @@ func statusCodeFromDistributionError(err error) int {
}
case errcode.ErrorCoder:
return errs.ErrorCode().Descriptor().HTTPStatusCode
- default:
- if e, ok := err.(causer); ok {
- return statusCodeFromDistributionError(e.Cause())
- }
}
return http.StatusInternalServerError
}
+
+// statusCodeFromContainerdError returns status code for containerd errors when
+// consumed directly (not through gRPC)
+func statusCodeFromContainerdError(err error) int {
+ switch {
+ case containerderrors.IsInvalidArgument(err):
+ return http.StatusBadRequest
+ case containerderrors.IsNotFound(err):
+ return http.StatusNotFound
+ case containerderrors.IsAlreadyExists(err):
+ return http.StatusConflict
+ case containerderrors.IsFailedPrecondition(err):
+ return http.StatusPreconditionFailed
+ case containerderrors.IsUnavailable(err):
+ return http.StatusServiceUnavailable
+ case containerderrors.IsNotImplemented(err):
+ return http.StatusNotImplemented
+ default:
+ return http.StatusInternalServerError
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 000000000..7307d9694
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 000000000..cbcf86532
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1294 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+var unpigzPath string
+
+func init() {
+ if path, err := exec.LookPath("unpigz"); err != nil {
+ logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library")
+ } else {
+ logrus.Debugf("Using unpigz binary found at path %s", path)
+ unpigzPath = path
+ }
+}
+
+type (
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *idtools.Identity
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+ }
+)
+
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
+type Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ IDMapping *idtools.IdentityMapping
+}
+
+// NewDefaultArchiver returns a new Archiver without any IdentityMapping
+func NewDefaultArchiver() *Archiver {
+ return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}}
+}
+
+// breakoutError is used to differentiate errors related to breaking out
+// When testing archive breakout in the unit tests, this error is expected
+// in order for the test to pass.
+type breakoutError error
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+const (
+ modeISDIR = 040000 // Directory
+ modeISFIFO = 010000 // FIFO
+ modeISREG = 0100000 // Regular file
+ modeISLNK = 0120000 // Symbolic link
+ modeISBLK = 060000 // Block special file
+ modeISCHR = 020000 // Character special file
+ modeISSOCK = 0140000 // Socket
+)
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ defer rdr.Close()
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debug("Len too short")
+ continue
+ }
+ if bytes.Equal(m, source[:len(m)]) {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
+}
+
+func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
+ if unpigzPath == "" {
+ return gzip.NewReader(buf)
+ }
+
+ disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
+ if disablePigzEnv != "" {
+ if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil {
+ return nil, err
+ } else if disablePigz {
+ return gzip.NewReader(buf)
+ }
+ }
+
+ return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
+}
+
+func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(readBuf, func() error {
+ cancel()
+ return readBuf.Close()
+ })
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ ctx, cancel := context.WithCancel(context.Background())
+
+ gzReader, err := gzDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return wrapReadCloser(readBufWrapper, cancel), nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ ctx, cancel := context.WithCancel(context.Background())
+
+ xzReader, err := xzDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return wrapReadCloser(readBufWrapper, cancel), nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
+// modify the contents or header of an entry in the archive. If the file already
+// exists in the archive the TarModifierFunc will be called with the Header and
+// a reader which will return the files content. If the file does not exist both
+// header and content will be nil.
+type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
+
+// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
+// tar stream are modified if they match any of the keys in mods.
+func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
+ pipeReader, pipeWriter := io.Pipe()
+
+ go func() {
+ tarReader := tar.NewReader(inputTarStream)
+ tarWriter := tar.NewWriter(pipeWriter)
+ defer inputTarStream.Close()
+ defer tarWriter.Close()
+
+ modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
+ header, data, err := modifier(name, original, tarReader)
+ switch {
+ case err != nil:
+ return err
+ case header == nil:
+ return nil
+ }
+
+ header.Name = name
+ header.Size = int64(len(data))
+ if err := tarWriter.WriteHeader(header); err != nil {
+ return err
+ }
+ if len(data) != 0 {
+ if _, err := tarWriter.Write(data); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ var err error
+ var originalHeader *tar.Header
+ for {
+ originalHeader, err = tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+
+ modifier, ok := mods[originalHeader.Name]
+ if !ok {
+ // No modifiers for this file, copy the header and data
+ if err := tarWriter.WriteHeader(originalHeader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ if _, err := pools.Copy(tarWriter, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ continue
+ }
+ delete(mods, originalHeader.Name)
+
+ if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ // Apply the modifiers that haven't matched any files in the archive
+ for name, modifier := range mods {
+ if err := modify(name, nil, modifier, nil); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ pipeWriter.Close()
+
+ }()
+ return pipeReader
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+// FileInfoHeader creates a populated Header from fi.
+// Compared to archive pkg this function fills in more information.
+// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
+// which have been deleted since Go 1.9 archive/tar.
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
+ hdr.Name = canonicalTarName(name, fi.IsDir())
+ if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
+ return nil, err
+ }
+ return hdr, nil
+}
+
+// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
+// https://github.com/golang/go/commit/66b5a2f
+func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
+ fm := fi.Mode()
+ switch {
+ case fm.IsRegular():
+ mode |= modeISREG
+ case fi.IsDir():
+ mode |= modeISDIR
+ case fm&os.ModeSymlink != 0:
+ mode |= modeISLNK
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ mode |= modeISCHR
+ } else {
+ mode |= modeISBLK
+ }
+ case fm&os.ModeNamedPipe != 0:
+ mode |= modeISFIFO
+ case fm&os.ModeSocket != 0:
+ mode |= modeISSOCK
+ }
+ return mode
+}
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+ return nil
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ IdentityMapping *idtools.IdentityMapping
+ ChownOpts *idtools.Identity
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
+ return &tarAppender{
+ SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ IdentityMapping: idMapping,
+ ChownOpts: chownOpts,
+ }
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) string {
+ name = CanonicalTarNameForPath(name)
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ var link string
+ if fi.Mode()&os.ModeSymlink != 0 {
+ var err error
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := FileInfoHeader(name, fi, link)
+ if err != nil {
+ return err
+ }
+ if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hard linked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ inode, err := getInodeFromStat(fi.Sys())
+ if err != nil {
+ return err
+ }
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ //check whether the file is overlayfs whiteout
+ //if yes, skip re-mapping container ID mappings.
+ isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
+
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
+ fileIDPair, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
+ if err != nil {
+ return err
+ }
+ }
+
+ // explicitly override with ChownOpts
+ if ta.ChownOpts != nil {
+ hdr.Uid = ta.ChownOpts.UID
+ hdr.Gid = ta.ChownOpts.GID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+ if err != nil {
+ return err
+ }
+
+ // If a new whiteout file exists, write original hdr, then
+ // replace hdr with wo to be written after. Whiteouts should
+ // always be written after the original. Note the original
+ // hdr may have been updated to be a whiteout with returning
+ // a whiteout header
+ if wo != nil {
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+ }
+ hdr = wo
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ // We use system.OpenSequential to ensure we use sequential file
+ // access on Windows to avoid depleting the standby list.
+ // On Linux, this equates to a regular os.Open.
+ file, err := system.OpenSequential(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file. We use system.OpenFileSequential to use sequential
+ // file access to avoid depleting the standby list on Windows.
+ // On Linux, this equates to a regular os.OpenFile
+ file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar:
+ if inUserns { // cannot create devices in a userns
+ return nil
+ }
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debug("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ var errors []string
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ if err == syscall.ENOTSUP || err == syscall.EPERM {
+ // We ignore errors here because not all graphdrivers support
+ // xattrs *cough* old versions of AUFS *cough*. However only
+ // ENOTSUP should be emitted in that case, otherwise we still
+ // bail.
+ // EPERM occurs if modifying xattrs is not allowed. This can
+ // happen when running in userns with restrictions (ChromeOS).
+ errors = append(errors, err.Error())
+ continue
+ }
+ return err
+ }
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := newTarAppender(
+ idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ compressWriter,
+ options.ChownOpts,
+ )
+ ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = pm.Matches(relFilePath)
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (e.g. !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !pm.Exclusions() {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range pm.Patterns() {
+ if !pat.Exclusion() {
+ continue
+ }
+ if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+ rootIDs := idMapping.RootPair()
+ whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ if err := remapIDs(idMapping, hdr); err != nil {
+ return err
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMapping.UIDs(),
+ GIDMaps: archiver.IDMapping.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMapping.UIDs(),
+ GIDMaps: archiver.IDMapping.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this Archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootIDs := archiver.IDMapping.RootPair()
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := make(chan error, 1)
+
+ go func() {
+ defer close(errC)
+
+ errC <- func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ if err := remapIDs(archiver.IDMapping, hdr); err != nil {
+ return err
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ }()
+ }()
+ defer func() {
+ if er := <-errC; err == nil && er != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+// IdentityMapping returns the IdentityMapping of the archiver.
+func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping {
+ return archiver.IDMapping
+}
+
+func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
+ ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
+ hdr.Uid, hdr.Gid = ids.UID, ids.GID
+ return err
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+
+ // Ensure the command has exited before we clean anything up
+ done := make(chan struct{})
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(done)
+ }()
+
+ return ioutils.NewReadCloserWrapper(pipeR, func() error {
+ // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
+ // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
+ err := pipeR.Close()
+ <-done
+ return err
+ }), nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 000000000..0601f7b0d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,261 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containerd/continuity/fs"
+ "github.com/docker/docker/pkg/system"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{inUserNS: inUserNS}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct {
+ inUserNS bool
+}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ dir, filename := filepath.Split(hdr.Name)
+ hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return nil, err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ if hdr.Xattrs != nil {
+ delete(hdr.Xattrs, "trusted.overlay.opaque")
+ }
+
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ wo = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return
+}
+
+func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
+ if err != nil {
+ if c.inUserNS {
+ if err = replaceDirWithOverlayOpaque(dir); err != nil {
+ return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir)
+ }
+ } else {
+ return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
+ }
+ }
+ // don't write the file itself
+ return false, err
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+ if c.inUserNS {
+ // Ubuntu and a few distros support overlayfs in userns.
+ //
+ // Although we can't call mknod directly in userns (at least on bionic kernel 4.15),
+ // we can still create 0,0 char device using mknodChar0Overlay().
+ //
+ // NOTE: we don't need this hack for the containerd snapshotter+unpack model.
+ if err := mknodChar0Overlay(originalPath); err != nil {
+ return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath)
+ }
+ } else {
+ return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
+ }
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking.
+// This function can be used for creating 0,0 char device in userns on Ubuntu.
+//
+// Steps:
+// * Mkdir lower,upper,merged,work
+// * Create lower/dummy
+// * Mount overlayfs
+// * Unlink merged/dummy
+// * Unmount overlayfs
+// * Make sure a 0,0 char device is created as upper/dummy
+// * Rename upper/dummy to cleansedOriginalPath
+func mknodChar0Overlay(cleansedOriginalPath string) error {
+ dir := filepath.Dir(cleansedOriginalPath)
+ tmp, err := ioutil.TempDir(dir, "mc0o")
+ if err != nil {
+ return errors.Wrapf(err, "failed to create a tmp directory under %s", dir)
+ }
+ defer os.RemoveAll(tmp)
+ lower := filepath.Join(tmp, "l")
+ upper := filepath.Join(tmp, "u")
+ work := filepath.Join(tmp, "w")
+ merged := filepath.Join(tmp, "m")
+ for _, s := range []string{lower, upper, work, merged} {
+ if err := os.MkdirAll(s, 0700); err != nil {
+ return errors.Wrapf(err, "failed to mkdir %s", s)
+ }
+ }
+ dummyBase := "d"
+ lowerDummy := filepath.Join(lower, dummyBase)
+ if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil {
+ return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy)
+ }
+ mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
+ // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
+ if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
+ return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
+ }
+ mergedDummy := filepath.Join(merged, dummyBase)
+ if err := os.Remove(mergedDummy); err != nil {
+ syscall.Unmount(merged, 0)
+ return errors.Wrapf(err, "failed to unlink %s", mergedDummy)
+ }
+ if err := syscall.Unmount(merged, 0); err != nil {
+ return errors.Wrapf(err, "failed to unmount %s", merged)
+ }
+ upperDummy := filepath.Join(upper, dummyBase)
+ if err := isChar0(upperDummy); err != nil {
+ return err
+ }
+ if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil {
+ return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath)
+ }
+ return nil
+}
+
+func isChar0(path string) error {
+ osStat, err := os.Stat(path)
+ if err != nil {
+ return errors.Wrapf(err, "failed to stat %s", path)
+ }
+ st, ok := osStat.Sys().(*syscall.Stat_t)
+ if !ok {
+ return errors.Errorf("got unsupported stat for %s", path)
+ }
+ if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR {
+ return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode)
+ }
+ if st.Rdev != 0 {
+ return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev)
+ }
+ return nil
+}
+
+// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque
+// xattr. The contents of the directory are preserved.
+func replaceDirWithOverlayOpaque(path string) error {
+ if path == "/" {
+ return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"")
+ }
+ dir := filepath.Dir(path)
+ tmp, err := ioutil.TempDir(dir, "rdwoo")
+ if err != nil {
+ return errors.Wrapf(err, "failed to create a tmp directory under %s", dir)
+ }
+ defer os.RemoveAll(tmp)
+ // newPath is a new empty directory crafted with trusted.overlay.opaque xattr.
+ // we copy the content of path into newPath, remove path, and rename newPath to path.
+ newPath, err := createDirWithOverlayOpaque(tmp)
+ if err != nil {
+ return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp)
+ }
+ if err := fs.CopyDir(newPath, path); err != nil {
+ return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path)
+ }
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ return os.Rename(newPath, path)
+}
+
+// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr,
+// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu.
+func createDirWithOverlayOpaque(tmp string) (string, error) {
+ lower := filepath.Join(tmp, "l")
+ upper := filepath.Join(tmp, "u")
+ work := filepath.Join(tmp, "w")
+ merged := filepath.Join(tmp, "m")
+ for _, s := range []string{lower, upper, work, merged} {
+ if err := os.MkdirAll(s, 0700); err != nil {
+ return "", errors.Wrapf(err, "failed to mkdir %s", s)
+ }
+ }
+ dummyBase := "d"
+ lowerDummy := filepath.Join(lower, dummyBase)
+ if err := os.MkdirAll(lowerDummy, 0700); err != nil {
+ return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy)
+ }
+ mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
+ // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
+ if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
+ return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
+ }
+ mergedDummy := filepath.Join(merged, dummyBase)
+ if err := os.Remove(mergedDummy); err != nil {
+ syscall.Unmount(merged, 0)
+ return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy)
+ }
+ // upperDummy becomes a 0,0-char device file here
+ if err := os.Mkdir(mergedDummy, 0700); err != nil {
+ syscall.Unmount(merged, 0)
+ return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy)
+ }
+ // upperDummy becomes a directory with trusted.overlay.opaque xattr
+ // (but can't be verified in userns)
+ if err := syscall.Unmount(merged, 0); err != nil {
+ return "", errors.Wrapf(err, "failed to unmount %s", merged)
+ }
+ upperDummy := filepath.Join(upper, dummyBase)
+ return upperDummy, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 000000000..65a73354c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
index bb6bf7145..d62633603 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -1,29 +1,21 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
// +build !windows
-package archive
+package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"errors"
"os"
"path/filepath"
+ "strings"
"syscall"
"github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
-// CanonicalTarNameForPath returns platform-specific filepath
-// to canonical posix-style path for tar archival. p is relative
-// path.
-func CanonicalTarNameForPath(p string) (string, error) {
- return p, nil // already unix-style
-}
-
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
@@ -35,7 +27,36 @@ func fixVolumePathPrefix(srcPath string) string {
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
- return srcPath + string(filepath.Separator) + include
+ return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) string {
+ return p // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ // Currently go does not fill in the major/minors
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
+ }
+ }
+
+ return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
@@ -48,7 +69,7 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) {
return
}
-func getFileIdentity(stat interface{}) (idtools.Identity, error) {
+func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
@@ -57,21 +78,38 @@ func getFileIdentity(stat interface{}) (idtools.Identity, error) {
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
}
-func chmodTarEntry(perm os.FileMode) os.FileMode {
- return perm // noop for unix as golang APIs provide perm bits correctly
-}
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ if rsystem.RunningInUserNS() {
+ // cannot create a device if running in user namespace
+ return nil
+ }
-func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
- s, ok := stat.(*syscall.Stat_t)
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= unix.S_IFBLK
+ case tar.TypeChar:
+ mode |= unix.S_IFCHR
+ case tar.TypeFifo:
+ mode |= unix.S_IFIFO
+ }
- if ok {
- // Currently go does not fill in the major/minors
- if s.Mode&unix.S_IFBLK != 0 ||
- s.Mode&unix.S_IFCHR != 0 {
- hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
- hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
+ return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
}
}
-
- return
+ return nil
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
index 33c1dff03..ae6b89fd7 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -1,35 +1,14 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package archive
+package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
- "fmt"
"os"
"path/filepath"
- "strings"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/longpath"
)
-// CanonicalTarNameForPath returns platform-specific filepath
-// to canonical posix-style path for tar archival. p is relative
-// path.
-func CanonicalTarNameForPath(p string) (string, error) {
- // windows: convert windows style relative path with backslashes
- // into forward slashes. Since windows does not allow '/' or '\'
- // in file names, it is mostly safe to replace however we must
- // check just in case
- if strings.Contains(p, "/") {
- //lint:ignore ST1005 Windows should be capitalized :)
- return "", fmt.Errorf("Windows path contains forward slash: %s", p)
- }
- return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
-}
-
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
@@ -42,20 +21,17 @@ func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
-func getInodeFromStat(stat interface{}) (inode uint64, err error) {
- // do nothing. no notion of Inode in stat on Windows
- return
-}
-
-func getFileIdentity(stat interface{}) (idtools.Identity, error) {
- // no notion of file ownership mapping yet on Windows
- return idtools.Identity{}, nil
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) string {
+ return filepath.ToSlash(p)
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
- // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
@@ -69,3 +45,23 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
// do nothing. no notion of Rdev, Nlink in stat on Windows
return
}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Inode in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
+ // no notion of file ownership mapping yet on Windows
+ return idtools.Identity{UID: 0, GID: 0}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 000000000..aedb91b03
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,445 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify = iota
+ // ChangeAdd represents the add operation.
+ ChangeAdd
+ // ChangeDelete represents the delete operation.
+ ChangeDelete
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar doesn't have sub-second mtime precision. The go tar
+// writer (1.10+) does when using PAX format, but we round times to seconds
+// to ensure archives have the same hashes for backwards compatibility.
+// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
+//
+// Non-sub-second is problematic when we apply changes via tar
+// files. We handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a.Equal(b) ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ change.Path = deletedFile
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.StatT
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ !bytes.Equal(oldChild.capability, newChild.capability) {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 000000000..f8792b3d4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,286 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of unix.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 000000000..ba744741c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 000000000..06217b716
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,43 @@
+// +build !windows
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size or modification time for dirs, its not a good
+ // measure of change. See https://github.com/moby/moby/issues/9874
+ // for a description of the issue with modification time, and
+ // https://github.com/moby/moby/pull/11422 for the change.
+ // (Note that in the Windows implementation of this function,
+ // modification time IS taken as a change). See
+ // https://github.com/moby/moby/pull/37982 for more information.
+ (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return fi.Sys().(*syscall.Stat_t).Ino
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 000000000..9906685e4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,34 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Note there is slight difference between the Linux and Windows
+ // implementations here. Due to https://github.com/moby/moby/issues/9874,
+ // and the fix at https://github.com/moby/moby/pull/11422, Linux does not
+ // consider a change to the directory time as a change. Windows on NTFS
+ // does. See https://github.com/moby/moby/pull/37982 for more information.
+
+ if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode().IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 000000000..57fddac07
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,480 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in the separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
+ originalPath = strings.Replace(originalPath, "/", string(sep), -1)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath, sep) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(sep)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
+ cleanedPath += string(sep)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string, sep byte) bool {
+ return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string, sep byte) bool {
+ return len(path) > 0 && path[len(path)-1] == sep
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(filepath.FromSlash(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(os.PathSeparator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+ opts := TarResourceRebaseOpts(sourceBase, rebaseName)
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+ return TarWithOptions(sourceDir, opts)
+}
+
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
+// parameters to be sent to TarWithOptions (the TarOptions struct)
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
+ filter := []string{sourceBase}
+ return &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ }
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Stat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path, os.PathSeparator):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ // srcContent tar stream, as served by TarWithOptions(), is
+ // definitely in PAX format, but tar.Next() mistakenly guesses it
+ // as USTAR, which creates a problem: if the newBase is >100
+ // characters long, WriteHeader() returns an error like
+ // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
+ //
+ // To fix, set the format to PAX here. See docker/for-linux issue #484.
+ hdr.Format = tar.FormatPAX
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
+ }
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// TODO @gupta-ak. These might have to be changed in the future to be
+// continuity driver aware as well to support LCOW.
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) &&
+ !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 000000000..3958364f5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 000000000..a878d1bac
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 000000000..27897e6ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,260 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600)
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := remapIDs(idMapping, srcHdr); err != nil {
+ return 0, err
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ if runtime.GOOS != "windows" {
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask)
+ }
+
+ if decompress {
+ decompLayer, err := DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompLayer.Close()
+ layer = decompLayer
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
new file mode 100644
index 000000000..495db809e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "docker-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 000000000..797143ee8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = (1 << 30) - 2
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 000000000..f58bf227f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
index a61c22a08..4c072a87e 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -1,8 +1,4 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package archive
+package archive // import "github.com/docker/docker/pkg/archive"
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 000000000..85435694c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (io.Reader, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
index 47ecd0c09..5e6310fdc 100644
--- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
@@ -5,24 +5,8 @@ import (
"os"
"path/filepath"
"strings"
-
- "github.com/docker/docker/pkg/idtools"
)
-// GetStatic returns the home directory for the current user without calling
-// os/user.Current(). This is useful for static-linked binary on glibc-based
-// system, because a call to os/user.Current() in a static binary leads to
-// segfault due to a glibc issue that won't be fixed in a short term.
-// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
-func GetStatic() (string, error) {
- uid := os.Getuid()
- usr, err := idtools.LookupUID(uid)
- if err != nil {
- return "", err
- }
- return usr.Home, nil
-}
-
// GetRuntimeDir returns XDG_RUNTIME_DIR.
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
index f0a363ded..67ab9e9b3 100644
--- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
@@ -6,12 +6,6 @@ import (
"errors"
)
-// GetStatic is not needed for non-linux systems.
-// (Precisely, it is needed only for glibc-based linux systems.)
-func GetStatic() (string, error) {
- return "", errors.New("homedir.GetStatic() is not supported on this system")
-}
-
// GetRuntimeDir is unsupported on non-linux system.
func GetRuntimeDir() (string, error) {
return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
index d85e12448..284e8be7c 100644
--- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
@@ -4,8 +4,7 @@ package homedir // import "github.com/docker/docker/pkg/homedir"
import (
"os"
-
- "github.com/opencontainers/runc/libcontainer/user"
+ "os/user"
)
// Key returns the env var name for the user's home dir based on
@@ -17,11 +16,13 @@ func Key() string {
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
+// If compiling statically, ensure the osusergo build tag is used.
+// If needing to do nss lookups, do not compile statically.
func Get() string {
home := os.Getenv(Key())
if home == "" {
- if u, err := user.CurrentUser(); err == nil {
- return u.Home
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
}
}
return home
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
index fb239743a..3981ff64d 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -59,7 +59,7 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting
paths = append(paths, dirPath)
}
}
- if err := system.MkdirAll(path, mode, ""); err != nil {
+ if err := system.MkdirAll(path, mode); err != nil {
return err
}
} else {
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
index 4ae38a1b1..35ede0fff 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -11,7 +11,7 @@ import (
// Ownership is handled elsewhere, but in the future could be support here
// too.
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
- if err := system.MkdirAll(path, mode, ""); err != nil {
+ if err := system.MkdirAll(path, mode); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
index 99a32ae05..6d6640898 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -1,20 +1,15 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package jsonmessage
+package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage"
import (
"encoding/json"
"fmt"
"io"
- "os"
"strings"
"time"
+ "github.com/docker/docker/pkg/term"
units "github.com/docker/go-units"
- "github.com/fsouza/go-dockerclient/internal/term"
- gotty "github.com/ijc/Gotty"
+ "github.com/morikuni/aec"
)
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
@@ -144,100 +139,34 @@ type JSONMessage struct {
Stream string `json:"stream,omitempty"`
Status string `json:"status,omitempty"`
Progress *JSONProgress `json:"progressDetail,omitempty"`
- ProgressMessage string `json:"progress,omitempty"` // deprecated
+ ProgressMessage string `json:"progress,omitempty"` //deprecated
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
Error *JSONError `json:"errorDetail,omitempty"`
- ErrorMessage string `json:"error,omitempty"` // deprecated
+ ErrorMessage string `json:"error,omitempty"` //deprecated
// Aux contains out-of-band data, such as digests for push signing and image id after building.
Aux *json.RawMessage `json:"aux,omitempty"`
}
-/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
-type termInfo interface {
- Parse(attr string, params ...interface{}) (string, error)
-}
-
-type noTermInfo struct{} // canary used when no terminfo.
-
-func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
- return "", fmt.Errorf("noTermInfo")
+func clearLine(out io.Writer) {
+ eraseMode := aec.EraseModes.All
+ cl := aec.EraseLine(eraseMode)
+ fmt.Fprint(out, cl)
}
-func clearLine(out io.Writer, ti termInfo) error {
- // el2 (clear whole line) is not exposed by terminfo.
-
- // First clear line from beginning to cursor
- if attr, err := ti.Parse("el1"); err == nil {
- _, err = fmt.Fprintf(out, "%s", attr)
- if err != nil {
- return err
- }
- } else {
- _, err := fmt.Fprintf(out, "\x1b[1K")
- if err != nil {
- return err
- }
- }
- // Then clear line from cursor to end
- if attr, err := ti.Parse("el"); err == nil {
- _, err = fmt.Fprintf(out, "%s", attr)
- if err != nil {
- return err
- }
- } else {
- _, err := fmt.Fprintf(out, "\x1b[K")
- if err != nil {
- return err
- }
- }
-
- return nil
+func cursorUp(out io.Writer, l uint) {
+ fmt.Fprint(out, aec.Up(l))
}
-func cursorUp(out io.Writer, ti termInfo, l int) error {
- if l == 0 { // Should never be the case, but be tolerant
- return nil
- }
- if attr, err := ti.Parse("cuu", l); err == nil {
- _, err = fmt.Fprintf(out, "%s", attr)
- if err != nil {
- return err
- }
- } else {
- _, err := fmt.Fprintf(out, "\x1b[%dA", l)
- if err != nil {
- return err
- }
- }
- return nil
+func cursorDown(out io.Writer, l uint) {
+ fmt.Fprint(out, aec.Down(l))
}
-func cursorDown(out io.Writer, ti termInfo, l int) error {
- if l == 0 { // Should never be the case, but be tolerant
- return nil
- }
- if attr, err := ti.Parse("cud", l); err == nil {
- _, err = fmt.Fprintf(out, "%s", attr)
- if err != nil {
- return err
- }
- } else {
- _, err := fmt.Fprintf(out, "\x1b[%dB", l)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
-// is a terminal. If this is the case, it will erase the entire current line
-// when displaying the progressbar.
-func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
+// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the
+// entire current line when displaying the progressbar.
+func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
if jm.Error != nil {
if jm.Error.Code == 401 {
return fmt.Errorf("authentication is required")
@@ -245,59 +174,32 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
return jm.Error
}
var endl string
- if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
- clearLine(out, termInfo)
+ if isTerminal && jm.Stream == "" && jm.Progress != nil {
+ clearLine(out)
endl = "\r"
- _, err := fmt.Fprint(out, endl)
- if err != nil {
- return err
- }
- } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal
+ fmt.Fprintf(out, endl)
+ } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
return nil
}
if jm.TimeNano != 0 {
- _, err := fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
- if err != nil {
- return err
- }
+ fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
} else if jm.Time != 0 {
- _, err := fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
- if err != nil {
- return err
- }
+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
}
if jm.ID != "" {
- _, err := fmt.Fprintf(out, "%s: ", jm.ID)
- if err != nil {
- return err
- }
+ fmt.Fprintf(out, "%s: ", jm.ID)
}
if jm.From != "" {
- _, err := fmt.Fprintf(out, "(from %s) ", jm.From)
- if err != nil {
- return err
- }
+ fmt.Fprintf(out, "(from %s) ", jm.From)
}
- if jm.Progress != nil && termInfo != nil {
- _, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
- if err != nil {
- return err
- }
- } else if jm.ProgressMessage != "" { // deprecated
- _, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
- if err != nil {
- return err
- }
+ if jm.Progress != nil && isTerminal {
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+ } else if jm.ProgressMessage != "" { //deprecated
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
} else if jm.Stream != "" {
- _, err := fmt.Fprintf(out, "%s%s", jm.Stream, endl)
- if err != nil {
- return err
- }
+ fmt.Fprintf(out, "%s%s", jm.Stream, endl)
} else {
- _, err := fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
- if err != nil {
- return err
- }
+ fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
}
return nil
}
@@ -308,25 +210,11 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error {
var (
dec = json.NewDecoder(in)
- ids = make(map[string]int)
+ ids = make(map[string]uint)
)
- var termInfo termInfo
-
- if isTerminal {
- term := os.Getenv("TERM")
- if term == "" {
- term = "vt102"
- }
-
- var err error
- if termInfo, err = gotty.OpenTermInfo(term); err != nil {
- termInfo = &noTermInfo{}
- }
- }
-
for {
- diff := 0
+ var diff uint
var jm JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
@@ -354,20 +242,15 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
// when we output something that's not
// accounted for in the map, such as a line
// with no ID.
- line = len(ids)
+ line = uint(len(ids))
ids[jm.ID] = line
- if termInfo != nil {
- _, err := fmt.Fprintf(out, "\n")
- if err != nil {
- return err
- }
+ if isTerminal {
+ fmt.Fprintf(out, "\n")
}
}
- diff = len(ids) - line
- if termInfo != nil {
- if err := cursorUp(out, termInfo, diff); err != nil {
- return err
- }
+ diff = uint(len(ids)) - line
+ if isTerminal {
+ cursorUp(out, diff)
}
} else {
// When outputting something that isn't progress
@@ -375,13 +258,11 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
// don't want progress entries from some previous
// operation to be updated (for example, pull -a
// with multiple tags).
- ids = make(map[string]int)
+ ids = make(map[string]uint)
}
- err := jm.Display(out, termInfo)
- if jm.ID != "" && termInfo != nil {
- if err := cursorDown(out, termInfo, diff); err != nil {
- return err
- }
+ err := jm.Display(out, isTerminal)
+ if jm.ID != "" && isTerminal {
+ cursorDown(out, diff)
}
if err != nil {
return err
diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
index 3d68800eb..f3f46055e 100644
--- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
+++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
@@ -587,9 +587,6 @@ var (
// Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf
"mestorf",
- // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky
- "minsky",
-
// Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani
"mirzakhani",
@@ -737,9 +734,6 @@ var (
// Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence
"spence",
- // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman
- "stallman",
-
// Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker
"stonebraker",
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go
index 6e599eebc..6a302dcee 100644
--- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go
@@ -9,7 +9,7 @@ import (
"os/exec"
"strings"
- "github.com/mattn/go-shellwords"
+ shellwords "github.com/mattn/go-shellwords"
)
// GetKernelVersion gets the current kernel version.
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
index b7b15a1fd..a04763872 100644
--- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
@@ -36,7 +36,7 @@ func GetKernelVersion() (*VersionInfo, error) {
}
KVI.kvi = blex
- // Important - docker.exe MUST be manifested for this API to return
+ // Important - dockerd.exe MUST be manifested for this API to return
// the correct information.
dwVersion, err := windows.GetVersion()
if err != nil {
@@ -44,7 +44,7 @@ func GetKernelVersion() (*VersionInfo, error) {
}
KVI.major = int(dwVersion & 0xFF)
- KVI.minor = int((dwVersion & 0XFF00) >> 8)
+ KVI.minor = int((dwVersion & 0xFF00) >> 8)
KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
return KVI, nil
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go
deleted file mode 100644
index b2139b60e..000000000
--- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package kernel // import "github.com/docker/docker/pkg/parsers/kernel"
-
-import (
- "golang.org/x/sys/unix"
-)
-
-func uname() (*unix.Utsname, error) {
- uts := &unix.Utsname{}
-
- if err := unix.Uname(uts); err != nil {
- return nil, err
- }
- return uts, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
index 46339c282..3b978fd3b 100644
--- a/vendor/github.com/docker/docker/pkg/pools/pools.go
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -72,6 +72,7 @@ func (bp *bufferPool) Get() []byte {
}
func (bp *bufferPool) Put(b []byte) {
+ //nolint:staticcheck // TODO changing this to a pointer makes tests fail. Investigate if we should change or not (otherwise remove this TODO)
bp.pool.Put(b)
}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go
index adeb16305..dcee3e9f9 100644
--- a/vendor/github.com/docker/docker/pkg/system/filesys.go
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go
@@ -8,14 +8,14 @@ import (
"path/filepath"
)
-// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
+// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems.
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
- return MkdirAll(path, perm, sddl)
+ return os.MkdirAll(path, perm)
}
// MkdirAll creates a directory named path along with any necessary parents,
// with permission specified by attribute perm for all dir created.
-func MkdirAll(path string, perm os.FileMode, sddl string) error {
+func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
index 3049ff38a..7cebd6efc 100644
--- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -26,9 +26,10 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
return mkdirall(path, true, sddl)
}
-// MkdirAll implementation that is volume path aware for Windows.
-func MkdirAll(path string, _ os.FileMode, sddl string) error {
- return mkdirall(path, false, sddl)
+// MkdirAll implementation that is volume path aware for Windows. It can be used
+// as a drop-in replacement for os.MkdirAll()
+func MkdirAll(path string, _ os.FileMode) error {
+ return mkdirall(path, false, "")
}
// mkdirall is a custom version of os.MkdirAll modified for use on Windows
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
index d79e8b076..cd060eff2 100644
--- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -7,7 +7,7 @@ import (
"strconv"
"strings"
- "github.com/docker/go-units"
+ units "github.com/docker/go-units"
)
// ReadMemInfo retrieves memory statistics of the host system and returns a
@@ -27,6 +27,7 @@ func ReadMemInfo() (*MemInfo, error) {
func parseMemInfo(reader io.Reader) (*MemInfo, error) {
meminfo := &MemInfo{}
scanner := bufio.NewScanner(reader)
+ memAvailable := int64(-1)
for scanner.Scan() {
// Expected format: ["MemTotal:", "1234", "kB"]
parts := strings.Fields(scanner.Text())
@@ -48,6 +49,8 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) {
meminfo.MemTotal = bytes
case "MemFree:":
meminfo.MemFree = bytes
+ case "MemAvailable:":
+ memAvailable = bytes
case "SwapTotal:":
meminfo.SwapTotal = bytes
case "SwapFree:":
@@ -55,6 +58,9 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) {
}
}
+ if memAvailable != -1 {
+ meminfo.MemFree = memAvailable
+ }
// Handle errors that may have occurred during the reading of the file.
if err := scanner.Err(); err != nil {
diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go
index a3d957afa..64e892289 100644
--- a/vendor/github.com/docker/docker/pkg/system/path.go
+++ b/vendor/github.com/docker/docker/pkg/system/path.go
@@ -5,8 +5,6 @@ import (
"path/filepath"
"runtime"
"strings"
-
- "github.com/containerd/continuity/pathdriver"
)
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@@ -27,6 +25,12 @@ func DefaultPathEnv(os string) string {
}
+// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter
+// actually uses in order to avoid system depending on containerd/continuity.
+type PathVerifier interface {
+ IsAbs(string) bool
+}
+
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
// is the system drive.
// On Linux: this is a no-op.
@@ -42,7 +46,7 @@ func DefaultPathEnv(os string) string {
// a --> a
// /a --> \a
// d:\ --> Fail
-func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) {
+func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
if runtime.GOOS != "windows" || LCOWSupported() {
return path, nil
}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
index 98c9eb18d..17d5d131a 100644
--- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -8,7 +8,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
- rdev: s.Rdev,
+ // the type is 32bit on mips
+ rdev: uint64(s.Rdev), // nolint: unconvert
mtim: s.Mtim}, nil
}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
deleted file mode 100644
index 756b92d1e..000000000
--- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package system // import "github.com/docker/docker/pkg/system"
-
-import "syscall"
-
-// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
-func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
- mode: uint32(s.Mode),
- uid: s.Uid,
- gid: s.Gid,
- rdev: uint64(s.Rdev),
- mtim: s.Mtim}, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
index 4ae92fa6c..eb19f9c85 100644
--- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -55,7 +55,6 @@ var (
ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
- procGetProductInfo = modkernel32.NewProc("GetProductInfo")
procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW")
procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl")
)
@@ -85,7 +84,7 @@ type osVersionInfoEx struct {
}
// GetOSVersion gets the operating system version on Windows. Note that
-// docker.exe must be manifested to get the correct version information.
+// dockerd.exe must be manifested to get the correct version information.
func GetOSVersion() OSVersion {
var err error
osv := OSVersion{}
@@ -118,22 +117,6 @@ func IsWindowsClient() bool {
return osviex.ProductType == verNTWorkstation
}
-// IsIoTCore returns true if the currently running image is based off of
-// Windows 10 IoT Core.
-// @engine maintainers - this function should not be removed or modified as it
-// is used to enforce licensing restrictions on Windows.
-func IsIoTCore() bool {
- var returnedProductType uint32
- r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType)))
- if r1 == 0 {
- logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err)
- return false
- }
- const productIoTUAP = 0x0000007B
- const productIoTUAPCommercial = 0x00000083
- return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial
-}
-
// Unmount is a platform-specific helper function to call
// the unmount syscall. Not supported on Windows
func Unmount(dest string) error {
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
deleted file mode 100644
index 0afe85458..000000000
--- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package system // import "github.com/docker/docker/pkg/system"
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// LUtimesNano is used to change access and modification time of the specified path.
-// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
-func LUtimesNano(path string, ts []syscall.Timespec) error {
- atFdCwd := unix.AT_FDCWD
-
- var _path *byte
- _path, err := unix.BytePtrFromString(path)
- if err != nil {
- return err
- }
- if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go
index ed1b9fad5..61ba8c474 100644
--- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go
@@ -1,8 +1,9 @@
+// +build linux freebsd
+
package system // import "github.com/docker/docker/pkg/system"
import (
"syscall"
- "unsafe"
"golang.org/x/sys/unix"
)
@@ -10,13 +11,12 @@ import (
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
- var _path *byte
- _path, err := unix.BytePtrFromString(path)
- if err != nil {
- return err
+ uts := []unix.Timespec{
+ unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])),
+ unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])),
}
-
- if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil && err != unix.ENOSYS {
return err
}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
index a3c3db131..6e83b59e9 100644
--- a/vendor/github.com/docker/docker/pkg/term/term_windows.go
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -7,7 +7,7 @@ import (
"syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE
"github.com/Azure/go-ansiterm/winterm"
- "github.com/docker/docker/pkg/term/windows"
+ windowsconsole "github.com/docker/docker/pkg/term/windows"
)
// State holds the console mode for the terminal.
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
index 3e5593ca6..7e8f265d4 100644
--- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go
+++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
@@ -1,3 +1,4 @@
+// +build windows
// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
@@ -9,7 +10,7 @@ import (
"os"
"sync"
- "github.com/Azure/go-ansiterm"
+ ansiterm "github.com/Azure/go-ansiterm"
"github.com/sirupsen/logrus"
)
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json
index 250a03e13..71ac412df 100644
--- a/vendor/github.com/docker/docker/profiles/seccomp/default.json
+++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json
@@ -167,6 +167,9 @@
"ioprio_set",
"io_setup",
"io_submit",
+ "io_uring_enter",
+ "io_uring_register",
+ "io_uring_setup",
"ipc",
"kill",
"lchown",
@@ -314,6 +317,7 @@
"sigaltstack",
"signalfd",
"signalfd4",
+ "sigprocmask",
"sigreturn",
"socket",
"socketcall",
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go
index 9f222a6ee..12721a120 100644
--- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go
+++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go
@@ -143,20 +143,18 @@ Loop:
}
if call.Name != "" {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args))
- }
-
- for _, n := range call.Names {
- newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args))
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args))
+ } else {
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args))
}
}
return newConfig, nil
}
-func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall {
+func createSpecsSyscall(names []string, action types.Action, args []*types.Arg) specs.LinuxSyscall {
newCall := specs.LinuxSyscall{
- Names: []string{name},
+ Names: names,
Action: specs.LinuxSeccompAction(action),
}
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
index 53333f43e..16148b408 100644
--- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
+++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
@@ -160,6 +160,9 @@ func DefaultProfile() *types.Seccomp {
"ioprio_set",
"io_setup",
"io_submit",
+ "io_uring_enter",
+ "io_uring_register",
+ "io_uring_setup",
"ipc",
"kill",
"lchown",
@@ -307,6 +310,7 @@ func DefaultProfile() *types.Seccomp {
"sigaltstack",
"signalfd",
"signalfd4",
+ "sigprocmask",
"sigreturn",
"socket",
"socketcall",
diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitattributes b/vendor/github.com/fsouza/go-dockerclient/.gitattributes
new file mode 100644
index 000000000..6313b56c5
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/.gitattributes
@@ -0,0 +1 @@
+* text=auto eol=lf
diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore
index ef22245ea..5f6b48eae 100644
--- a/vendor/github.com/fsouza/go-dockerclient/.gitignore
+++ b/vendor/github.com/fsouza/go-dockerclient/.gitignore
@@ -1,4 +1,2 @@
# temporary symlink for testing
testing/data/symlink
-Gopkg.lock
-vendor/
diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml
new file mode 100644
index 000000000..aa3ab39e5
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml
@@ -0,0 +1,29 @@
+run:
+ deadline: 5m
+
+ skip-dirs:
+ - internal
+
+issues:
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - bodyclose
+ - goconst
+ - gosec
+ - scopelint
+ - path: testing[/\\].+\.go
+ linters:
+ - gosec
+
+linters:
+ enable-all: true
+ disable:
+ - dupl
+ - errcheck
+ - funlen
+ - gochecknoglobals
+ - gocognit
+ - goconst
+ - lll
+ - wsl
diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml
index a02ed3f24..060f9e6a8 100644
--- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml
+++ b/vendor/github.com/fsouza/go-dockerclient/.travis.yml
@@ -1,9 +1,8 @@
dist: xenial
language: go
go:
- - 1.11.x
- 1.12.x
- - 1.13rc1
+ - 1.13.x
os:
- linux
- osx
@@ -17,7 +16,6 @@ env:
- GO111MODULE=on
install:
- travis-scripts/win-setup.bash
- - make testdeps
script:
- travis_wait 25 travis-scripts/run-tests.bash
services:
diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
index a8ae99976..663410f95 100644
--- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS
+++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
@@ -119,6 +119,7 @@ Kevin Xu
Kim, Hirokuni
Kostas Lekkas
Kyle Allan
+Kyle Quest
Yunhee Lee
Liron Levin
Lior Yankovich
diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile
index 858adec1b..e0880ff67 100644
--- a/vendor/github.com/fsouza/go-dockerclient/Makefile
+++ b/vendor/github.com/fsouza/go-dockerclient/Makefile
@@ -1,34 +1,27 @@
.PHONY: \
all \
- staticcheck \
+ lint \
fmt \
- fmtcheck \
pretest \
test \
integration
all: test
-staticcheck:
- GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck
- staticcheck ./...
-
-fmtcheck:
- if [ -z "$${SKIP_FMT_CHECK}" ]; then [ -z "$$(gofumpt -s -d . | tee /dev/stderr)" ]; fi
+lint:
+ cd /tmp && GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+ golangci-lint run
fmt:
GO111MODULE=off go get mvdan.cc/gofumpt
gofumpt -s -w .
-testdeps:
- go mod download
-
-pretest: staticcheck fmtcheck
+pretest: lint
gotest:
go test -race -vet all ./...
-test: testdeps pretest gotest
+test: pretest gotest
integration:
go test -tags docker_integration -run TestIntegration -v
diff --git a/vendor/github.com/fsouza/go-dockerclient/README.md b/vendor/github.com/fsouza/go-dockerclient/README.md
index f310ccc92..b7af7d0b9 100644
--- a/vendor/github.com/fsouza/go-dockerclient/README.md
+++ b/vendor/github.com/fsouza/go-dockerclient/README.md
@@ -118,7 +118,7 @@ All development commands can be seen in the [Makefile](Makefile).
Commited code must pass:
-* [staticcheck](https://staticcheck.io/)
+* [golangci-lint](integration_unix_test.go)
* [gofumpt](https://github.com/mvdan/gofumpt)
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml
index 793d88b7a..d9f374f50 100644
--- a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml
+++ b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml
@@ -6,11 +6,9 @@ environment:
GOPATH: c:\gopath
GOPROXY: https://proxy.golang.org
GO111MODULE: on
- SKIP_FMT_CHECK: 1
matrix:
- - GOVERSION: "1.11.13"
- - GOVERSION: "1.12.9"
- - GOVERSION: "1.13rc1"
+ - GOVERSION: "1.12.10"
+ - GOVERSION: "1.13.1"
install:
- choco install make
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
@@ -18,8 +16,8 @@ install:
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip
- 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL
build_script:
- - make testdeps
+ - make pretest
test_script:
- - make pretest gotest
+ - make gotest
matrix:
fast_finish: true
diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go
index 0062e5c5c..eb1a31716 100644
--- a/vendor/github.com/fsouza/go-dockerclient/auth.go
+++ b/vendor/github.com/fsouza/go-dockerclient/auth.go
@@ -12,6 +12,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "net/http"
"os"
"path"
"strings"
@@ -219,7 +220,7 @@ func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) {
if conf == nil {
return authStatus, errors.New("conf is nil")
}
- resp, err := c.do("POST", "/auth", doOptions{data: conf})
+ resp, err := c.do(http.MethodPost, "/auth", doOptions{data: conf})
if err != nil {
return authStatus, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go
index 6f394bfc1..825ba38ab 100644
--- a/vendor/github.com/fsouza/go-dockerclient/client.go
+++ b/vendor/github.com/fsouza/go-dockerclient/client.go
@@ -32,8 +32,8 @@ import (
"time"
"github.com/docker/docker/pkg/homedir"
+ "github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stdcopy"
- "github.com/fsouza/go-dockerclient/internal/jsonmessage"
)
const (
@@ -54,7 +54,9 @@ var (
ErrInactivityTimeout = errors.New("inactivity time exceeded timeout")
apiVersion112, _ = NewAPIVersion("1.12")
+ apiVersion118, _ = NewAPIVersion("1.18")
apiVersion119, _ = NewAPIVersion("1.19")
+ apiVersion121, _ = NewAPIVersion("1.21")
apiVersion124, _ = NewAPIVersion("1.24")
apiVersion125, _ = NewAPIVersion("1.25")
apiVersion135, _ = NewAPIVersion("1.35")
@@ -269,11 +271,12 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
// See https://github.com/moby/moby/blob/28d7dba41d0c0d9c7f0dafcc79d3c59f2b3f5dc3/client/options.go#L51
func NewClientFromEnv() (*Client, error) {
- client, err := NewVersionedClientFromEnv(os.Getenv("DOCKER_API_VERSION"))
+ apiVersionString := os.Getenv("DOCKER_API_VERSION")
+ client, err := NewVersionedClientFromEnv(apiVersionString)
if err != nil {
return nil, err
}
- client.SkipServerVersionCheck = true
+ client.SkipServerVersionCheck = apiVersionString == ""
return client, nil
}
@@ -397,7 +400,7 @@ func (c *Client) Ping() error {
// See https://goo.gl/wYfgY1 for more details.
func (c *Client) PingWithContext(ctx context.Context) error {
path := "/_ping"
- resp, err := c.do("GET", path, doOptions{context: ctx})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: ctx})
if err != nil {
return err
}
@@ -409,7 +412,7 @@ func (c *Client) PingWithContext(ctx context.Context) error {
}
func (c *Client) getServerAPIVersionString() (version string, err error) {
- resp, err := c.do("GET", "/version", doOptions{})
+ resp, err := c.do(http.MethodGet, "/version", doOptions{})
if err != nil {
return "", err
}
@@ -465,7 +468,7 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e
req.Header.Set("User-Agent", userAgent)
if doOptions.data != nil {
req.Header.Set("Content-Type", "application/json")
- } else if method == "POST" {
+ } else if method == http.MethodPost {
req.Header.Set("Content-Type", "plain/text")
}
@@ -520,7 +523,7 @@ func chooseError(ctx context.Context, err error) error {
}
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
- if (method == "POST" || method == "PUT") && streamOptions.in == nil {
+ if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil {
streamOptions.in = bytes.NewReader(nil)
}
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
@@ -529,11 +532,11 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error
return err
}
}
- return c.streamUrl(method, c.getURL(path), streamOptions)
+ return c.streamURL(method, c.getURL(path), streamOptions)
}
-func (c *Client) streamUrl(method, url string, streamOptions streamOptions) error {
- if (method == "POST" || method == "PUT") && streamOptions.in == nil {
+func (c *Client) streamURL(method, url string, streamOptions streamOptions) error {
+ if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil {
streamOptions.in = bytes.NewReader(nil)
}
if !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
@@ -547,7 +550,7 @@ func (c *Client) streamUrl(method, url string, streamOptions streamOptions) erro
return err
}
req.Header.Set("User-Agent", userAgent)
- if method == "POST" {
+ if method == http.MethodPost {
req.Header.Set("Content-Type", "plain/text")
}
for key, val := range streamOptions.headers {
@@ -606,6 +609,7 @@ func (c *Client) streamUrl(method, url string, streamOptions streamOptions) erro
return chooseError(subCtx, err)
}
+ defer resp.Body.Close()
} else {
if resp, err = c.HTTPClient.Do(req.WithContext(subCtx)); err != nil {
if strings.Contains(err.Error(), "connection refused") {
@@ -613,11 +617,11 @@ func (c *Client) streamUrl(method, url string, streamOptions streamOptions) erro
}
return chooseError(subCtx, err)
}
+ defer resp.Body.Close()
if streamOptions.reqSent != nil {
close(streamOptions.reqSent)
}
}
- defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return newError(resp)
}
@@ -776,9 +780,10 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close
errs := make(chan error, 1)
quit := make(chan struct{})
go func() {
- //lint:ignore SA1019 this is needed here
+ //nolint:staticcheck
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
+ //nolint:bodyclose
clientconn.Do(req)
if hijackOptions.success != nil {
hijackOptions.success <- struct{}{}
@@ -874,25 +879,26 @@ func (c *Client) getURL(path string) string {
}
func (c *Client) getPath(basepath string, opts interface{}) (string, error) {
+ queryStr, requiredAPIVersion := queryStringVersion(opts)
+ return c.pathVersionCheck(basepath, queryStr, requiredAPIVersion)
+}
+
+func (c *Client) pathVersionCheck(basepath, queryStr string, requiredAPIVersion APIVersion) (string, error) {
urlStr := strings.TrimRight(c.endpointURL.String(), "/")
if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol {
urlStr = ""
}
- queryStr, requiredAPIVersion := queryStringVersion(opts)
-
if c.requestedAPIVersion != nil {
if c.requestedAPIVersion.GreaterThanOrEqualTo(requiredAPIVersion) {
return fmt.Sprintf("%s/v%s%s?%s", urlStr, c.requestedAPIVersion, basepath, queryStr), nil
- } else {
- return "", fmt.Errorf("API %s requires version %s, requested version %s is insufficient",
- basepath, requiredAPIVersion, c.requestedAPIVersion)
}
+ return "", fmt.Errorf("API %s requires version %s, requested version %s is insufficient",
+ basepath, requiredAPIVersion, c.requestedAPIVersion)
}
if requiredAPIVersion != nil {
return fmt.Sprintf("%s/v%s%s?%s", urlStr, requiredAPIVersion, basepath, queryStr), nil
- } else {
- return fmt.Sprintf("%s%s?%s", urlStr, basepath, queryStr), nil
}
+ return fmt.Sprintf("%s%s?%s", urlStr, basepath, queryStr), nil
}
// getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX
@@ -922,7 +928,7 @@ func queryStringVersion(opts interface{}) (string, APIVersion) {
if value.Kind() != reflect.Struct {
return "", nil
}
- var apiVersion APIVersion = nil
+ var apiVersion APIVersion
items := url.Values(map[string][]string{})
for i := 0; i < value.NumField(); i++ {
field := value.Type().Field(i)
@@ -1002,7 +1008,7 @@ func addQueryStringValue(items url.Values, key string, v reflect.Value) bool {
if vLen > 0 {
for i := 0; i < vLen; i++ {
if addQueryStringValue(items, key, v.Index(i)) {
- valuesAdded += 1
+ valuesAdded++
}
}
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/fsouza/go-dockerclient/client_windows.go
index 63d97ec65..731d5c962 100644
--- a/vendor/github.com/fsouza/go-dockerclient/client_windows.go
+++ b/vendor/github.com/fsouza/go-dockerclient/client_windows.go
@@ -32,7 +32,8 @@ func (c *Client) initializeNativeClient(trFunc func() *http.Transport) {
return
}
namedPipePath := c.endpointURL.Path
- dialFunc := func(network, addr string) (net.Conn, error) {
+ //nolint:unparam
+ dialFunc := func(_, addr string) (net.Conn, error) {
timeout := namedPipeConnectTimeout
return winio.DialPipe(namedPipePath, &timeout)
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go
index 898646fbf..0a8ab361c 100644
--- a/vendor/github.com/fsouza/go-dockerclient/container.go
+++ b/vendor/github.com/fsouza/go-dockerclient/container.go
@@ -85,7 +85,7 @@ type NetworkList struct {
// See https://goo.gl/kaOHGw for more details.
func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
path := "/containers/json?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
@@ -474,6 +474,12 @@ type Container struct {
RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty" toml:"RestartCount,omitempty"`
AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty" toml:"AppArmorProfile,omitempty"`
+
+ MountLabel string `json:"MountLabel,omitempty" yaml:"MountLabel,omitempty" toml:"MountLabel,omitempty"`
+ ProcessLabel string `json:"ProcessLabel,omitempty" yaml:"ProcessLabel,omitempty" toml:"ProcessLabel,omitempty"`
+ Platform string `json:"Platform,omitempty" yaml:"Platform,omitempty" toml:"Platform,omitempty"`
+ SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" toml:"SizeRw,omitempty"`
+ SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"`
}
// UpdateContainerOptions specify parameters to the UpdateContainer function.
@@ -500,7 +506,7 @@ type UpdateContainerOptions struct {
//
// See https://goo.gl/Y6fXUy for more details.
func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error {
- resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{
+ resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+id+"/update"), doOptions{
data: opts,
forceJSON: true,
context: opts.Context,
@@ -528,7 +534,7 @@ type RenameContainerOptions struct {
//
// See https://goo.gl/46inai for more details.
func (c *Client) RenameContainer(opts RenameContainerOptions) error {
- resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{
+ resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{
context: opts.Context,
})
if err != nil {
@@ -549,13 +555,14 @@ func (c *Client) InspectContainer(id string) (*Container, error) {
// The context object can be used to cancel the inspect request.
//
// See https://goo.gl/FaI5JT for more details.
+//nolint:golint
func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) {
return c.inspectContainer(id, doOptions{context: ctx})
}
func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) {
path := "/containers/" + id + "/json"
- resp, err := c.do("GET", path, opts)
+ resp, err := c.do(http.MethodGet, path, opts)
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchContainer{ID: id}
@@ -575,7 +582,7 @@ func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error)
// See https://goo.gl/15KKzh for more details.
func (c *Client) ContainerChanges(id string) ([]Change, error) {
path := "/containers/" + id + "/changes"
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchContainer{ID: id}
@@ -611,7 +618,7 @@ type CreateContainerOptions struct {
func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
path := "/containers/create?" + queryString(opts)
resp, err := c.do(
- "POST",
+ http.MethodPost,
path,
doOptions{
data: struct {
@@ -729,6 +736,7 @@ type HostConfig struct {
Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty" toml:"Binds,omitempty"`
CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty" toml:"CapAdd,omitempty"`
CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty" toml:"CapDrop,omitempty"`
+ Capabilities []string `json:"Capabilities,omitempty" yaml:"Capabilities,omitempty" toml:"Capabilities,omitempty"` // Mutually exclusive w.r.t. CapAdd and CapDrop API v1.40
GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty" toml:"GroupAdd,omitempty"`
ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty" toml:"ContainerIDFile,omitempty"`
LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty" toml:"LxcConf,omitempty"`
@@ -742,6 +750,8 @@ type HostConfig struct {
UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty" toml:"UsernsMode,omitempty"`
NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty" toml:"NetworkMode,omitempty"`
IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty" toml:"IpcMode,omitempty"`
+ Isolation string `json:"Isolation,omitempty" yaml:"Isolation,omitempty" toml:"Isolation,omitempty"` // Windows only
+ ConsoleSize [2]int `json:"ConsoleSize,omitempty" yaml:"ConsoleSize,omitempty" toml:"ConsoleSize,omitempty"` // Windows only height x width
PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty" toml:"PidMode,omitempty"`
UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty" toml:"UTSMode,omitempty"`
RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty" toml:"RestartPolicy,omitempty"`
@@ -749,6 +759,7 @@ type HostConfig struct {
DeviceCgroupRules []string `json:"DeviceCgroupRules,omitempty" yaml:"DeviceCgroupRules,omitempty" toml:"DeviceCgroupRules,omitempty"`
LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty" toml:"LogConfig,omitempty"`
SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty" toml:"SecurityOpt,omitempty"`
+ CgroupnsMode string `json:"CgroupnsMode,omitempty" yaml:"CgroupnsMode,omitempty" toml:"CgroupnsMode,omitempty"` // v1.40+
Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty" toml:"Cgroup,omitempty"`
CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty" toml:"CgroupParent,omitempty"`
Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"`
@@ -784,6 +795,8 @@ type HostConfig struct {
IOMaximumBandwidth int64 `json:"IOMaximumBandwidth,omitempty" yaml:"IOMaximumBandwidth,omitempty"`
IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"`
Mounts []HostMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"`
+ MaskedPaths []string `json:"MaskedPaths,omitempty" yaml:"MaskedPaths,omitempty" toml:"MaskedPaths,omitempty"`
+ ReadonlyPaths []string `json:"ReadonlyPaths,omitempty" yaml:"ReadonlyPaths,omitempty" toml:"ReadonlyPaths,omitempty"`
Runtime string `json:"Runtime,omitempty" yaml:"Runtime,omitempty" toml:"Runtime,omitempty"`
Init bool `json:",omitempty" yaml:",omitempty"`
Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"`
@@ -820,6 +833,7 @@ func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
// API 1.24 or greater.
//
// See https://goo.gl/fbOSZy for more details.
+//nolint:golint
func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error {
return c.startContainer(id, hostConfig, doOptions{context: ctx})
}
@@ -833,7 +847,7 @@ func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOption
opts.data = hostConfig
opts.forceJSON = true
}
- resp, err := c.do("POST", path, opts)
+ resp, err := c.do(http.MethodPost, path, opts)
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: id, Err: err}
@@ -860,13 +874,14 @@ func (c *Client) StopContainer(id string, timeout uint) error {
// container request.
//
// See https://goo.gl/R9dZcV for more details.
+//nolint:golint
func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error {
return c.stopContainer(id, timeout, doOptions{context: ctx})
}
func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error {
path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
- resp, err := c.do("POST", path, opts)
+ resp, err := c.do(http.MethodPost, path, opts)
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: id}
@@ -886,7 +901,7 @@ func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error {
// See https://goo.gl/MrAKQ5 for more details.
func (c *Client) RestartContainer(id string, timeout uint) error {
path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
- resp, err := c.do("POST", path, doOptions{})
+ resp, err := c.do(http.MethodPost, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: id}
@@ -902,7 +917,7 @@ func (c *Client) RestartContainer(id string, timeout uint) error {
// See https://goo.gl/D1Yaii for more details.
func (c *Client) PauseContainer(id string) error {
path := fmt.Sprintf("/containers/%s/pause", id)
- resp, err := c.do("POST", path, doOptions{})
+ resp, err := c.do(http.MethodPost, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: id}
@@ -918,7 +933,7 @@ func (c *Client) PauseContainer(id string) error {
// See https://goo.gl/sZ2faO for more details.
func (c *Client) UnpauseContainer(id string) error {
path := fmt.Sprintf("/containers/%s/unpause", id)
- resp, err := c.do("POST", path, doOptions{})
+ resp, err := c.do(http.MethodPost, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: id}
@@ -948,7 +963,7 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
args = fmt.Sprintf("?ps_args=%s", psArgs)
}
path := fmt.Sprintf("/containers/%s/top%s", id, args)
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return result, &NoSuchContainer{ID: id}
@@ -1116,7 +1131,7 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) {
reqSent := make(chan struct{})
go func() {
defer close(errC)
- err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
+ err := c.stream(http.MethodGet, fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
rawJSONStream: true,
useJSONDecoder: true,
stdout: writeCloser,
@@ -1184,7 +1199,7 @@ type KillContainerOptions struct {
// See https://goo.gl/JnTxXZ for more details.
func (c *Client) KillContainer(opts KillContainerOptions) error {
path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
if err != nil {
e, ok := err.(*Error)
if !ok {
@@ -1225,7 +1240,7 @@ type RemoveContainerOptions struct {
// See https://goo.gl/hL5IPC for more details.
func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
path := "/containers/" + opts.ID + "?" + queryString(opts)
- resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: opts.ID}
@@ -1254,7 +1269,7 @@ type UploadToContainerOptions struct {
func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error {
url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
- return c.stream("PUT", url, streamOptions{
+ return c.stream(http.MethodPut, url, streamOptions{
in: opts.InputStream,
context: opts.Context,
})
@@ -1277,7 +1292,7 @@ type DownloadFromContainerOptions struct {
func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error {
url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
- return c.stream("GET", url, streamOptions{
+ return c.stream(http.MethodGet, url, streamOptions{
setRawTerminal: true,
stdout: opts.OutputStream,
inactivityTimeout: opts.InactivityTimeout,
@@ -1310,7 +1325,7 @@ func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead")
}
url := fmt.Sprintf("/containers/%s/copy", opts.Container)
- resp, err := c.do("POST", url, doOptions{
+ resp, err := c.do(http.MethodPost, url, doOptions{
data: opts,
context: opts.Context,
})
@@ -1338,12 +1353,13 @@ func (c *Client) WaitContainer(id string) (int, error) {
// inspect request.
//
// See https://goo.gl/4AGweZ for more details.
+//nolint:golint
func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) {
return c.waitContainer(id, doOptions{context: ctx})
}
func (c *Client) waitContainer(id string, opts doOptions) (int, error) {
- resp, err := c.do("POST", "/containers/"+id+"/wait", opts)
+ resp, err := c.do(http.MethodPost, "/containers/"+id+"/wait", opts)
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return 0, &NoSuchContainer{ID: id}
@@ -1377,7 +1393,7 @@ type CommitContainerOptions struct {
// See https://goo.gl/CzIguf for more details.
func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
path := "/commit?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
data: opts.Run,
context: opts.Context,
})
@@ -1412,6 +1428,9 @@ type AttachToContainerOptions struct {
// to unexpected behavior.
Success chan struct{}
+ // Override the key sequence for detaching a container.
+ DetachKeys string
+
// Use raw terminal? Usually true when the container contains a TTY.
RawTerminal bool `qs:"-"`
@@ -1451,7 +1470,7 @@ func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (Cl
return nil, &NoSuchContainer{ID: opts.Container}
}
path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
- return c.hijack("POST", path, hijackOptions{
+ return c.hijack(http.MethodPost, path, hijackOptions{
success: opts.Success,
setRawTerminal: opts.RawTerminal,
in: opts.InputStream,
@@ -1501,7 +1520,7 @@ func (c *Client) Logs(opts LogsOptions) error {
opts.Tail = "all"
}
path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
- return c.stream("GET", path, streamOptions{
+ return c.stream(http.MethodGet, path, streamOptions{
setRawTerminal: opts.RawTerminal,
stdout: opts.OutputStream,
stderr: opts.ErrorStream,
@@ -1517,7 +1536,7 @@ func (c *Client) ResizeContainerTTY(id string, height, width int) error {
params := make(url.Values)
params.Set("h", strconv.Itoa(height))
params.Set("w", strconv.Itoa(width))
- resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{})
+ resp, err := c.do(http.MethodPost, "/containers/"+id+"/resize?"+params.Encode(), doOptions{})
if err != nil {
return err
}
@@ -1545,7 +1564,7 @@ func (c *Client) ExportContainer(opts ExportContainerOptions) error {
return &NoSuchContainer{ID: opts.ID}
}
url := fmt.Sprintf("/containers/%s/export", opts.ID)
- return c.stream("GET", url, streamOptions{
+ return c.stream(http.MethodGet, url, streamOptions{
setRawTerminal: true,
stdout: opts.OutputStream,
inactivityTimeout: opts.InactivityTimeout,
@@ -1574,7 +1593,7 @@ type PruneContainersResults struct {
// See https://goo.gl/wnkgDT for more details.
func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) {
path := "/containers/prune?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/distribution.go b/vendor/github.com/fsouza/go-dockerclient/distribution.go
index d0f8ce74c..6e5e12f7d 100644
--- a/vendor/github.com/fsouza/go-dockerclient/distribution.go
+++ b/vendor/github.com/fsouza/go-dockerclient/distribution.go
@@ -6,6 +6,7 @@ package docker
import (
"encoding/json"
+ "net/http"
"github.com/docker/docker/api/types/registry"
)
@@ -13,7 +14,7 @@ import (
// InspectDistribution returns image digest and platform information by contacting the registry
func (c *Client) InspectDistribution(name string) (*registry.DistributionInspect, error) {
path := "/distribution/" + name + "/json"
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go
index 3a3364d9d..6de7c5535 100644
--- a/vendor/github.com/fsouza/go-dockerclient/event.go
+++ b/vendor/github.com/fsouza/go-dockerclient/event.go
@@ -178,7 +178,7 @@ func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
return nil
}
-func (eventState *eventMonitoringState) disableEventMonitoring() error {
+func (eventState *eventMonitoringState) disableEventMonitoring() {
eventState.Lock()
defer eventState.Unlock()
@@ -191,7 +191,6 @@ func (eventState *eventMonitoringState) disableEventMonitoring() error {
close(eventState.C)
close(eventState.errC)
}
- return nil
}
func (eventState *eventMonitoringState) monitorEvents(c *Client) {
@@ -330,17 +329,18 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
if err != nil {
return err
}
- //lint:ignore SA1019 this is needed here
+ //nolint:staticcheck
conn := httputil.NewClientConn(dial, nil)
- req, err := http.NewRequest("GET", uri, nil)
+ req, err := http.NewRequest(http.MethodGet, uri, nil)
if err != nil {
return err
}
+ //nolint:bodyclose
res, err := conn.Do(req)
if err != nil {
return err
}
- //lint:ignore SA1019 ClientConn is needed here
+ //nolint:staticcheck
go func(res *http.Response, conn *httputil.ClientConn) {
defer conn.Close()
defer res.Body.Close()
diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go
index d804b10b8..48d1ad349 100644
--- a/vendor/github.com/fsouza/go-dockerclient/exec.go
+++ b/vendor/github.com/fsouza/go-dockerclient/exec.go
@@ -30,6 +30,7 @@ type CreateExecOptions struct {
Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"`
User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"`
+ DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"`
Context context.Context `json:"-"`
AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"`
AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"`
@@ -50,7 +51,7 @@ func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
return nil, errors.New("exec configuration WorkingDir is only supported in API#1.35 and above")
}
path := fmt.Sprintf("/containers/%s/exec", opts.Container)
- resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{data: opts, context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchContainer{ID: opts.Container}
@@ -119,7 +120,7 @@ func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWa
path := fmt.Sprintf("/exec/%s/start", id)
if opts.Detach {
- resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{data: opts, context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchExec{ID: id}
@@ -130,7 +131,7 @@ func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWa
return nil, nil
}
- return c.hijack("POST", path, hijackOptions{
+ return c.hijack(http.MethodPost, path, hijackOptions{
success: opts.Success,
setRawTerminal: opts.RawTerminal,
in: opts.InputStream,
@@ -151,7 +152,7 @@ func (c *Client) ResizeExecTTY(id string, height, width int) error {
params.Set("w", strconv.Itoa(width))
path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
- resp, err := c.do("POST", path, doOptions{})
+ resp, err := c.do(http.MethodPost, path, doOptions{})
if err != nil {
return err
}
@@ -192,7 +193,7 @@ type ExecInspect struct {
// See https://goo.gl/ctMUiW for more details
func (c *Client) InspectExec(id string) (*ExecInspect, error) {
path := fmt.Sprintf("/exec/%s/json", id)
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchExec{ID: id}
diff --git a/vendor/github.com/fsouza/go-dockerclient/go.mod b/vendor/github.com/fsouza/go-dockerclient/go.mod
index 90183e9d2..cb0ef690c 100644
--- a/vendor/github.com/fsouza/go-dockerclient/go.mod
+++ b/vendor/github.com/fsouza/go-dockerclient/go.mod
@@ -1,26 +1,26 @@
module github.com/fsouza/go-dockerclient
-go 1.11
+go 1.12
require (
- github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
+ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Microsoft/go-winio v0.4.14
github.com/Microsoft/hcsshim v0.8.6 // indirect
+ github.com/containerd/containerd v1.3.0 // indirect
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
- github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b
+ github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0
github.com/gogo/protobuf v1.2.1 // indirect
github.com/golang/protobuf v1.3.0 // indirect
github.com/google/go-cmp v0.3.1
github.com/gorilla/mux v1.7.3
- github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd
+ github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
- golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
- golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542
+ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
google.golang.org/grpc v1.22.0 // indirect
gotest.tools v2.2.0+incompatible // indirect
)
diff --git a/vendor/github.com/fsouza/go-dockerclient/go.sum b/vendor/github.com/fsouza/go-dockerclient/go.sum
index 00c823418..c4df2e956 100644
--- a/vendor/github.com/fsouza/go-dockerclient/go.sum
+++ b/vendor/github.com/fsouza/go-dockerclient/go.sum
@@ -7,14 +7,16 @@ github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jB
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M=
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU=
-github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA=
+github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
@@ -31,12 +33,12 @@ github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM=
-github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
+github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@@ -54,8 +56,8 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ=
+golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -67,9 +69,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ=
-golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go
index 31b6c53f4..5f72d6645 100644
--- a/vendor/github.com/fsouza/go-dockerclient/image.go
+++ b/vendor/github.com/fsouza/go-dockerclient/image.go
@@ -109,7 +109,7 @@ type ListImagesOptions struct {
// See https://goo.gl/BVzauZ for more details.
func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) {
path := "/images/json?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
@@ -129,13 +129,14 @@ type ImageHistory struct {
Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Tags,omitempty"`
CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" toml:"CreatedBy,omitempty"`
Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"`
+ Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"`
}
// ImageHistory returns the history of the image by its name or ID.
//
// See https://goo.gl/fYtxQa for more details.
func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
- resp, err := c.do("GET", "/images/"+name+"/history", doOptions{})
+ resp, err := c.do(http.MethodGet, "/images/"+name+"/history", doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, ErrNoSuchImage
@@ -154,7 +155,7 @@ func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
//
// See https://goo.gl/Vd2Pck for more details.
func (c *Client) RemoveImage(name string) error {
- resp, err := c.do("DELETE", "/images/"+name, doOptions{})
+ resp, err := c.do(http.MethodDelete, "/images/"+name, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return ErrNoSuchImage
@@ -181,7 +182,7 @@ type RemoveImageOptions struct {
// See https://goo.gl/Vd2Pck for more details.
func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error {
uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts))
- resp, err := c.do("DELETE", uri, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodDelete, uri, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return ErrNoSuchImage
@@ -196,7 +197,7 @@ func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error
//
// See https://goo.gl/ncLTG8 for more details.
func (c *Client) InspectImage(name string) (*Image, error) {
- resp, err := c.do("GET", "/images/"+name+"/json", doOptions{})
+ resp, err := c.do(http.MethodGet, "/images/"+name+"/json", doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, ErrNoSuchImage
@@ -271,7 +272,7 @@ func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error
name := opts.Name
opts.Name = ""
path := "/images/" + name + "/push?" + queryString(&opts)
- return c.stream("POST", path, streamOptions{
+ return c.stream(http.MethodPost, path, streamOptions{
setRawTerminal: true,
rawJSONStream: opts.RawJSONStream,
headers: headers,
@@ -322,12 +323,13 @@ func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error
return c.createImage(&opts, headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context)
}
+//nolint:golint
func (c *Client) createImage(opts interface{}, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error {
url, err := c.getPath("/images/create", opts)
if err != nil {
return err
}
- return c.streamUrl("POST", url, streamOptions{
+ return c.streamURL(http.MethodPost, url, streamOptions{
setRawTerminal: true,
headers: headers,
in: in,
@@ -351,7 +353,7 @@ type LoadImageOptions struct {
//
// See https://goo.gl/rEsBV3 for more details.
func (c *Client) LoadImage(opts LoadImageOptions) error {
- return c.stream("POST", "/images/load", streamOptions{
+ return c.stream(http.MethodPost, "/images/load", streamOptions{
setRawTerminal: true,
in: opts.InputStream,
stdout: opts.OutputStream,
@@ -373,7 +375,7 @@ type ExportImageOptions struct {
//
// See https://goo.gl/AuySaA for more details.
func (c *Client) ExportImage(opts ExportImageOptions) error {
- return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{
+ return c.stream(http.MethodGet, fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{
setRawTerminal: true,
stdout: opts.OutputStream,
inactivityTimeout: opts.InactivityTimeout,
@@ -403,7 +405,7 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error {
var err error
var exporturl string
if c.requestedAPIVersion.GreaterThanOrEqualTo(apiVersion125) {
- var str string = opts.Names[0]
+ str := opts.Names[0]
for _, val := range opts.Names[1:] {
str += "," + val
}
@@ -412,7 +414,6 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error {
OutputStream: opts.OutputStream,
InactivityTimeout: opts.InactivityTimeout,
Context: opts.Context,
-
})
} else {
exporturl, err = c.getPath("/images/get", &opts)
@@ -420,7 +421,7 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error {
if err != nil {
return err
}
- return c.streamUrl("GET", exporturl, streamOptions{
+ return c.streamURL(http.MethodGet, exporturl, streamOptions{
setRawTerminal: true,
stdout: opts.OutputStream,
inactivityTimeout: opts.InactivityTimeout,
@@ -471,35 +472,39 @@ func (c *Client) ImportImage(opts ImportImageOptions) error {
// https://goo.gl/4nYHwV.
type BuildImageOptions struct {
Context context.Context
- Name string `qs:"t"`
- Dockerfile string `qs:"dockerfile"`
- CacheFrom []string `qs:"-"`
- Memory int64 `qs:"memory"`
- Memswap int64 `qs:"memswap"`
- CPUShares int64 `qs:"cpushares"`
- CPUQuota int64 `qs:"cpuquota"`
- CPUPeriod int64 `qs:"cpuperiod"`
- CPUSetCPUs string `qs:"cpusetcpus"`
- Labels map[string]string `qs:"labels"`
- InputStream io.Reader `qs:"-"`
- OutputStream io.Writer `qs:"-"`
- Remote string `qs:"remote"`
+ Name string `qs:"t"`
+ Dockerfile string `ver:"1.25"`
+ ExtraHosts string `ver:"1.28"`
+ CacheFrom []string `qs:"-" ver:"1.25"`
+ Memory int64
+ Memswap int64
+ ShmSize int64
+ CPUShares int64
+ CPUQuota int64 `ver:"1.21"`
+ CPUPeriod int64 `ver:"1.21"`
+ CPUSetCPUs string
+ Labels map[string]string
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ Remote string
Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header
AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header
ContextDir string `qs:"-"`
- Ulimits []ULimit `qs:"-"`
- BuildArgs []BuildArg `qs:"-"`
- NetworkMode string `qs:"networkmode"`
+ Ulimits []ULimit `qs:"-" ver:"1.18"`
+ BuildArgs []BuildArg `qs:"-" ver:"1.21"`
+ NetworkMode string `ver:"1.25"`
+ Platform string `ver:"1.32"`
InactivityTimeout time.Duration `qs:"-"`
- CgroupParent string `qs:"cgroupparent"`
- SecurityOpt []string `qs:"securityopt"`
- Target string `gs:"target"`
- NoCache bool `qs:"nocache"`
- SuppressOutput bool `qs:"q"`
- Pull bool `qs:"pull"`
- RmTmpContainer bool `qs:"rm"`
- ForceRmTmpContainer bool `qs:"forcerm"`
- RawJSONStream bool `qs:"-"`
+ CgroupParent string
+ SecurityOpt []string
+ Target string
+ Outputs string `ver:"1.40"`
+ NoCache bool
+ SuppressOutput bool `qs:"q"`
+ Pull bool `ver:"1.16"`
+ RmTmpContainer bool `qs:"rm"`
+ ForceRmTmpContainer bool `qs:"forcerm" ver:"1.12"`
+ RawJSONStream bool `qs:"-"`
}
// BuildArg represents arguments that can be passed to the image when building
@@ -542,13 +547,16 @@ func (c *Client) BuildImage(opts BuildImageOptions) error {
return err
}
}
- qs := queryString(&opts)
+ qs, ver := queryStringVersion(&opts)
- if c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion125) && len(opts.CacheFrom) > 0 {
+ if len(opts.CacheFrom) > 0 {
if b, err := json.Marshal(opts.CacheFrom); err == nil {
item := url.Values(map[string][]string{})
item.Add("cachefrom", string(b))
qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ if ver == nil || apiVersion125.GreaterThan(ver) {
+ ver = apiVersion125
+ }
}
}
@@ -557,6 +565,9 @@ func (c *Client) BuildImage(opts BuildImageOptions) error {
item := url.Values(map[string][]string{})
item.Add("ulimits", string(b))
qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ if ver == nil || apiVersion118.GreaterThan(ver) {
+ ver = apiVersion118
+ }
}
}
@@ -569,10 +580,18 @@ func (c *Client) BuildImage(opts BuildImageOptions) error {
item := url.Values(map[string][]string{})
item.Add("buildargs", string(b))
qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ if ver == nil || apiVersion121.GreaterThan(ver) {
+ ver = apiVersion121
+ }
}
}
- return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{
+ buildURL, err := c.pathVersionCheck("/build", qs, ver)
+ if err != nil {
+ return err
+ }
+
+ return c.streamURL(http.MethodPost, buildURL, streamOptions{
setRawTerminal: true,
rawJSONStream: opts.RawJSONStream,
headers: headers,
@@ -610,7 +629,7 @@ func (c *Client) TagImage(name string, opts TagImageOptions) error {
if name == "" {
return ErrNoSuchImage
}
- resp, err := c.do("POST", "/images/"+name+"/tag?"+queryString(&opts), doOptions{
+ resp, err := c.do(http.MethodPost, "/images/"+name+"/tag?"+queryString(&opts), doOptions{
context: opts.Context,
})
if err != nil {
@@ -666,7 +685,7 @@ type APIImageSearch struct {
//
// See https://goo.gl/KLO9IZ for more details.
func (c *Client) SearchImages(term string) ([]APIImageSearch, error) {
- resp, err := c.do("GET", "/images/search?term="+term, doOptions{})
+ resp, err := c.do(http.MethodGet, "/images/search?term="+term, doOptions{})
if err != nil {
return nil, err
}
@@ -687,7 +706,7 @@ func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImage
return nil, err
}
- resp, err := c.do("GET", "/images/search?term="+term, doOptions{
+ resp, err := c.do(http.MethodGet, "/images/search?term="+term, doOptions{
headers: headers,
})
if err != nil {
@@ -725,7 +744,7 @@ type PruneImagesResults struct {
// See https://goo.gl/qfZlbZ for more details.
func (c *Client) PruneImages(opts PruneImagesOptions) (*PruneImagesResults, error) {
path := "/images/prune?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go
deleted file mode 100644
index f11ee0ee3..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go
+++ /dev/null
@@ -1,509 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package archive
-
-import (
- "archive/tar"
- "bufio"
- "compress/gzip"
- "fmt"
- "io"
- "log"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/docker/docker/pkg/fileutils"
- "github.com/docker/docker/pkg/idtools"
- "github.com/docker/docker/pkg/pools"
- "github.com/docker/docker/pkg/system"
-)
-
-const (
- // Uncompressed represents the uncompressed.
- Uncompressed Compression = iota
- // Bzip2 is bzip2 compression algorithm.
- Bzip2
- // Gzip is gzip compression algorithm.
- Gzip
- // Xz is xz compression algorithm.
- Xz
-)
-
-const (
- modeISDIR = 040000 // Directory
- modeISFIFO = 010000 // FIFO
- modeISREG = 0100000 // Regular file
- modeISLNK = 0120000 // Symbolic link
- modeISBLK = 060000 // Block special file
- modeISCHR = 020000 // Character special file
- modeISSOCK = 0140000 // Socket
-)
-
-// Compression is the state represents if compressed or not.
-type Compression int
-
-// Extension returns the extension of a file that uses the specified compression algorithm.
-func (compression *Compression) Extension() string {
- switch *compression {
- case Uncompressed:
- return "tar"
- case Bzip2:
- return "tar.bz2"
- case Gzip:
- return "tar.gz"
- case Xz:
- return "tar.xz"
- }
- return ""
-}
-
-// WhiteoutFormat is the format of whiteouts unpacked
-type WhiteoutFormat int
-
-// TarOptions wraps the tar options.
-type TarOptions struct {
- IncludeFiles []string
- ExcludePatterns []string
- Compression Compression
- UIDMaps []idtools.IDMap
- GIDMaps []idtools.IDMap
- ChownOpts *idtools.Identity
- // WhiteoutFormat is the expected on disk format for whiteout files.
- // This format will be converted to the standard format on pack
- // and from the standard format on unpack.
- WhiteoutFormat WhiteoutFormat
- // When unpacking, specifies whether overwriting a directory with a
- // non-directory is allowed and vice versa.
- // For each include when creating an archive, the included name will be
- // replaced with the matching name from this map.
- RebaseNames map[string]string
- NoLchown bool
- InUserNS bool
- IncludeSourceDir bool
- NoOverwriteDirNonDir bool
-}
-
-// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
-// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
-func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
- // Fix the source path to work with long path names. This is a no-op
- // on platforms other than Windows.
- srcPath = fixVolumePathPrefix(srcPath)
-
- pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
- if err != nil {
- return nil, err
- }
-
- pipeReader, pipeWriter := io.Pipe()
-
- compressWriter, err := CompressStream(pipeWriter, options.Compression)
- if err != nil {
- return nil, err
- }
-
- go func() {
- ta := newTarAppender(
- idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
- compressWriter,
- options.ChownOpts,
- )
- ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
-
- defer func() {
- // Make sure to check the error on Close.
- if err := ta.TarWriter.Close(); err != nil {
- log.Printf("Can't close tar writer: %s", err)
- }
- if err := compressWriter.Close(); err != nil {
- log.Printf("Can't close compress writer: %s", err)
- }
- if err := pipeWriter.Close(); err != nil {
- log.Printf("Can't close pipe writer: %s", err)
- }
- }()
-
- // this buffer is needed for the duration of this piped stream
- defer pools.BufioWriter32KPool.Put(ta.Buffer)
-
- // In general we log errors here but ignore them because
- // during e.g. a diff operation the container can continue
- // mutating the filesystem and we can see transient errors
- // from this
-
- stat, err := os.Lstat(srcPath)
- if err != nil {
- return
- }
-
- if !stat.IsDir() {
- // We can't later join a non-dir with any includes because the
- // 'walk' will error if "file/." is stat-ed and "file" is not a
- // directory. So, we must split the source path and use the
- // basename as the include.
- if len(options.IncludeFiles) > 0 {
- log.Print("Tar: Can't archive a file with includes")
- }
-
- dir, base := SplitPathDirEntry(srcPath)
- srcPath = dir
- options.IncludeFiles = []string{base}
- }
-
- if len(options.IncludeFiles) == 0 {
- options.IncludeFiles = []string{"."}
- }
-
- seen := make(map[string]bool)
-
- for _, include := range options.IncludeFiles {
- include := include
- rebaseName := options.RebaseNames[include]
-
- walkRoot := getWalkRoot(srcPath, include)
- filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
- if err != nil {
- log.Printf("Tar: Can't stat file %s to tar: %s", srcPath, err)
- return nil
- }
-
- relFilePath, err := filepath.Rel(srcPath, filePath)
- if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
- // Error getting relative path OR we are looking
- // at the source directory path. Skip in both situations.
- return nil
- }
-
- if options.IncludeSourceDir && include == "." && relFilePath != "." {
- relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
- }
-
- skip := false
-
- // If "include" is an exact match for the current file
- // then even if there's an "excludePatterns" pattern that
- // matches it, don't skip it. IOW, assume an explicit 'include'
- // is asking for that file no matter what - which is true
- // for some files, like .dockerignore and Dockerfile (sometimes)
- if include != relFilePath {
- skip, err = pm.Matches(relFilePath)
- if err != nil {
- log.Printf("Error matching %s: %v", relFilePath, err)
- return err
- }
- }
-
- if skip {
- // If we want to skip this file and its a directory
- // then we should first check to see if there's an
- // excludes pattern (e.g. !dir/file) that starts with this
- // dir. If so then we can't skip this dir.
-
- // Its not a dir then so we can just return/skip.
- if !f.IsDir() {
- return nil
- }
-
- // No exceptions (!...) in patterns so just skip dir
- if !pm.Exclusions() {
- return filepath.SkipDir
- }
-
- dirSlash := relFilePath + string(filepath.Separator)
-
- for _, pat := range pm.Patterns() {
- if !pat.Exclusion() {
- continue
- }
- if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
- // found a match - so can't skip this dir
- return nil
- }
- }
-
- // No matching exclusion dir so just skip dir
- return filepath.SkipDir
- }
-
- if seen[relFilePath] {
- return nil
- }
- seen[relFilePath] = true
-
- // Rename the base resource.
- if rebaseName != "" {
- var replacement string
- if rebaseName != string(filepath.Separator) {
- // Special case the root directory to replace with an
- // empty string instead so that we don't end up with
- // double slashes in the paths.
- replacement = rebaseName
- }
-
- relFilePath = strings.Replace(relFilePath, include, replacement, 1)
- }
-
- if err := ta.addTarFile(filePath, relFilePath); err != nil {
- log.Printf("Can't add file %s to tar: %s", filePath, err)
- // if pipe is broken, stop writing tar stream to it
- if err == io.ErrClosedPipe {
- return err
- }
- }
- return nil
- })
- }
- }()
-
- return pipeReader, nil
-}
-
-// CompressStream compresses the dest with specified compression algorithm.
-func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
- p := pools.BufioWriter32KPool
- buf := p.Get(dest)
- switch compression {
- case Uncompressed:
- writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
- return writeBufWrapper, nil
- case Gzip:
- gzWriter := gzip.NewWriter(dest)
- writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
- return writeBufWrapper, nil
- case Bzip2, Xz:
- // archive/bzip2 does not support writing, and there is no xz support at all
- // However, this is not a problem as docker only currently generates gzipped tars
- //lint:ignore ST1005 this is vendored/copied code
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
- default:
- //lint:ignore ST1005 this is vendored/copied code
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
- }
-}
-
-type tarWhiteoutConverter interface {
- ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
- ConvertRead(*tar.Header, string) (bool, error)
-}
-
-type tarAppender struct {
- TarWriter *tar.Writer
- Buffer *bufio.Writer
-
- // for hardlink mapping
- SeenFiles map[uint64]string
- IdentityMapping *idtools.IdentityMapping
- ChownOpts *idtools.Identity
-
- // For packing and unpacking whiteout files in the
- // non standard format. The whiteout files defined
- // by the AUFS standard are used as the tar whiteout
- // standard.
- WhiteoutConverter tarWhiteoutConverter
-}
-
-func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
- return &tarAppender{
- SeenFiles: make(map[uint64]string),
- TarWriter: tar.NewWriter(writer),
- Buffer: pools.BufioWriter32KPool.Get(nil),
- IdentityMapping: idMapping,
- ChownOpts: chownOpts,
- }
-}
-
-// addTarFile adds to the tar archive a file from `path` as `name`
-func (ta *tarAppender) addTarFile(path, name string) error {
- fi, err := os.Lstat(path)
- if err != nil {
- return err
- }
-
- var link string
- if fi.Mode()&os.ModeSymlink != 0 {
- var err error
- link, err = os.Readlink(path)
- if err != nil {
- return err
- }
- }
-
- hdr, err := FileInfoHeader(name, fi, link)
- if err != nil {
- return err
- }
- if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
- return err
- }
-
- // if it's not a directory and has more than 1 link,
- // it's hard linked, so set the type flag accordingly
- if !fi.IsDir() && hasHardlinks(fi) {
- inode, err := getInodeFromStat(fi.Sys())
- if err != nil {
- return err
- }
- // a link should have a name that it links too
- // and that linked name should be first in the tar archive
- if oldpath, ok := ta.SeenFiles[inode]; ok {
- hdr.Typeflag = tar.TypeLink
- hdr.Linkname = oldpath
- hdr.Size = 0 // This Must be here for the writer math to add up!
- } else {
- ta.SeenFiles[inode] = name
- }
- }
-
- // check whether the file is overlayfs whiteout
- // if yes, skip re-mapping container ID mappings.
- isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
-
- // handle re-mapping container ID mappings back to host ID mappings before
- // writing tar headers/files. We skip whiteout files because they were written
- // by the kernel and already have proper ownership relative to the host
- if !isOverlayWhiteout &&
- !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) &&
- !ta.IdentityMapping.Empty() {
- fileIdentity, err := getFileIdentity(fi.Sys())
- if err != nil {
- return err
- }
- hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIdentity)
- if err != nil {
- return err
- }
- }
-
- // explicitly override with ChownOpts
- if ta.ChownOpts != nil {
- hdr.Uid = ta.ChownOpts.UID
- hdr.Gid = ta.ChownOpts.GID
- }
-
- if ta.WhiteoutConverter != nil {
- wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
- if err != nil {
- return err
- }
-
- // If a new whiteout file exists, write original hdr, then
- // replace hdr with wo to be written after. Whiteouts should
- // always be written after the original. Note the original
- // hdr may have been updated to be a whiteout with returning
- // a whiteout header
- if wo != nil {
- if err := ta.TarWriter.WriteHeader(hdr); err != nil {
- return err
- }
- if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
- return fmt.Errorf("tar: cannot use whiteout for non-empty file")
- }
- hdr = wo
- }
- }
-
- if err := ta.TarWriter.WriteHeader(hdr); err != nil {
- return err
- }
-
- if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
- // We use system.OpenSequential to ensure we use sequential file
- // access on Windows to avoid depleting the standby list.
- // On Linux, this equates to a regular os.Open.
- file, err := system.OpenSequential(path)
- if err != nil {
- return err
- }
-
- ta.Buffer.Reset(ta.TarWriter)
- defer ta.Buffer.Reset(nil)
- _, err = io.Copy(ta.Buffer, file)
- file.Close()
- if err != nil {
- return err
- }
- err = ta.Buffer.Flush()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
-// to a tar header
-func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
- capability, _ := system.Lgetxattr(path, "security.capability")
- if capability != nil {
- //lint:ignore SA1019 this is vendored/copied code
- hdr.Xattrs = make(map[string]string)
- //lint:ignore SA1019 this is vendored/copied code
- hdr.Xattrs["security.capability"] = string(capability)
- }
- return nil
-}
-
-// FileInfoHeader creates a populated Header from fi.
-// Compared to archive pkg this function fills in more information.
-// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
-// which have been deleted since Go 1.9 archive/tar.
-func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
- hdr, err := tar.FileInfoHeader(fi, link)
- if err != nil {
- return nil, err
- }
- hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
- name, err = canonicalTarName(name, fi.IsDir())
- if err != nil {
- return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
- }
- hdr.Name = name
- if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
- return nil, err
- }
- return hdr, nil
-}
-
-// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
-// https://github.com/golang/go/commit/66b5a2f
-func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
- fm := fi.Mode()
- switch {
- case fm.IsRegular():
- mode |= modeISREG
- case fi.IsDir():
- mode |= modeISDIR
- case fm&os.ModeSymlink != 0:
- mode |= modeISLNK
- case fm&os.ModeDevice != 0:
- if fm&os.ModeCharDevice != 0 {
- mode |= modeISCHR
- } else {
- mode |= modeISBLK
- }
- case fm&os.ModeNamedPipe != 0:
- mode |= modeISFIFO
- case fm&os.ModeSocket != 0:
- mode |= modeISSOCK
- }
- return mode
-}
-
-// canonicalTarName provides a platform-independent and consistent posix-style
-// path for files and directories to be archived regardless of the platform.
-func canonicalTarName(name string, isDir bool) (string, error) {
- name, err := CanonicalTarNameForPath(name)
- if err != nil {
- return "", err
- }
-
- // suffix with '/' for directories
- if isDir && !strings.HasSuffix(name, "/") {
- name += "/"
- }
- return name, nil
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go
deleted file mode 100644
index e2059e489..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package archive
-
-import (
- "archive/tar"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/docker/docker/pkg/system"
- "golang.org/x/sys/unix"
-)
-
-const (
- // AUFSWhiteoutFormat is the default format for whiteouts
- AUFSWhiteoutFormat WhiteoutFormat = iota
- // OverlayWhiteoutFormat formats whiteout according to the overlay
- // standard.
- OverlayWhiteoutFormat
-)
-
-func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
- if format == OverlayWhiteoutFormat {
- return overlayWhiteoutConverter{}
- }
- return nil
-}
-
-type overlayWhiteoutConverter struct{}
-
-func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
- // convert whiteouts to AUFS format
- if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
- // we just rename the file and make it normal
- dir, filename := filepath.Split(hdr.Name)
- hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
- hdr.Mode = 0600
- hdr.Typeflag = tar.TypeReg
- hdr.Size = 0
- }
-
- if fi.Mode()&os.ModeDir != 0 {
- // convert opaque dirs to AUFS format by writing an empty file with the prefix
- opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
- if err != nil {
- return nil, err
- }
- if len(opaque) == 1 && opaque[0] == 'y' {
- //lint:ignore SA1019 this is vendored/copied code
- if hdr.Xattrs != nil {
- //lint:ignore SA1019 this is vendored/copied code
- delete(hdr.Xattrs, "trusted.overlay.opaque")
- }
-
- // create a header for the whiteout file
- // it should inherit some properties from the parent, but be a regular file
- wo = &tar.Header{
- Typeflag: tar.TypeReg,
- Mode: hdr.Mode & int64(os.ModePerm),
- Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
- Size: 0,
- Uid: hdr.Uid,
- Uname: hdr.Uname,
- Gid: hdr.Gid,
- Gname: hdr.Gname,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- }
- }
- }
-
- return
-}
-
-func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
- base := filepath.Base(path)
- dir := filepath.Dir(path)
-
- // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
- if base == WhiteoutOpaqueDir {
- err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
- // don't write the file itself
- return false, err
- }
-
- // if a file was deleted and we are using overlay, we need to create a character device
- if strings.HasPrefix(base, WhiteoutPrefix) {
- originalBase := base[len(WhiteoutPrefix):]
- originalPath := filepath.Join(dir, originalBase)
-
- if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
- return false, err
- }
- if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
- return false, err
- }
-
- // don't write the file itself
- return false, nil
- }
-
- return true, nil
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go
deleted file mode 100644
index 72822c857..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-// +build !linux
-
-package archive
-
-func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
- return nil
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go
deleted file mode 100644
index 39ea287bf..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-// +build !windows
-
-package archive
-
-import (
- "os"
- "syscall"
-)
-
-func hasHardlinks(fi os.FileInfo) bool {
- return fi.Sys().(*syscall.Stat_t).Nlink > 1
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go
deleted file mode 100644
index a93130474..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package archive
-
-import "os"
-
-func hasHardlinks(fi os.FileInfo) bool {
- return false
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go
deleted file mode 100644
index 45d45f20e..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package archive
-
-import (
- "os"
- "path/filepath"
-)
-
-// SplitPathDirEntry splits the given path between its directory name and its
-// basename by first cleaning the path but preserves a trailing "." if the
-// original path specified the current directory.
-func SplitPathDirEntry(path string) (dir, base string) {
- cleanedPath := filepath.Clean(filepath.FromSlash(path))
-
- if specifiesCurrentDir(path) {
- cleanedPath += string(os.PathSeparator) + "."
- }
-
- return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
-}
-
-// specifiesCurrentDir returns whether the given path specifies
-// a "current directory", i.e., the last path segment is `.`.
-func specifiesCurrentDir(path string) bool {
- return filepath.Base(path) == "."
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go
deleted file mode 100644
index 7d3c11358..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package term
-
-// Winsize represents the size of the terminal window.
-type Winsize struct {
- Height uint16
- Width uint16
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go
deleted file mode 100644
index 92a80a308..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-// +build !windows
-
-package term
-
-import "golang.org/x/sys/unix"
-
-// GetWinsize returns the window size based on the specified file descriptor.
-func GetWinsize(fd uintptr) (*Winsize, error) {
- uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
- ws := &Winsize{Height: uws.Row, Width: uws.Col}
- return ws, err
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go
deleted file mode 100644
index 4a07a5d19..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2014 Docker authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the DOCKER-LICENSE file.
-
-package term
-
-import "github.com/Azure/go-ansiterm/winterm"
-
-// GetWinsize returns the window size based on the specified file descriptor.
-func GetWinsize(fd uintptr) (*Winsize, error) {
- info, err := winterm.GetConsoleScreenBufferInfo(fd)
- if err != nil {
- return nil, err
- }
-
- winsize := &Winsize{
- Width: uint16(info.Window.Right - info.Window.Left + 1),
- Height: uint16(info.Window.Bottom - info.Window.Top + 1),
- }
-
- return winsize, nil
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go
index 01fd1f687..d42a66df6 100644
--- a/vendor/github.com/fsouza/go-dockerclient/misc.go
+++ b/vendor/github.com/fsouza/go-dockerclient/misc.go
@@ -8,6 +8,7 @@ import (
"context"
"encoding/json"
"net"
+ "net/http"
"strings"
"github.com/docker/docker/api/types/swarm"
@@ -22,7 +23,7 @@ func (c *Client) Version() (*Env, error) {
// VersionWithContext returns version information about the docker server.
func (c *Client) VersionWithContext(ctx context.Context) (*Env, error) {
- resp, err := c.do("GET", "/version", doOptions{context: ctx})
+ resp, err := c.do(http.MethodGet, "/version", doOptions{context: ctx})
if err != nil {
return nil, err
}
@@ -37,6 +38,7 @@ func (c *Client) VersionWithContext(ctx context.Context) (*Env, error) {
// DockerInfo contains information about the Docker server
//
// See https://goo.gl/bHUoz9 for more details.
+//nolint:golint
type DockerInfo struct {
ID string
Containers int
@@ -162,7 +164,7 @@ type IndexInfo struct {
//
// See https://goo.gl/ElTHi2 for more details.
func (c *Client) Info() (*DockerInfo, error) {
- resp, err := c.do("GET", "/info", doOptions{})
+ resp, err := c.do(http.MethodGet, "/info", doOptions{})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go
index 2331e08bf..3a06a52d5 100644
--- a/vendor/github.com/fsouza/go-dockerclient/network.go
+++ b/vendor/github.com/fsouza/go-dockerclient/network.go
@@ -48,7 +48,7 @@ type Endpoint struct {
//
// See https://goo.gl/6GugX3 for more details.
func (c *Client) ListNetworks() ([]Network, error) {
- resp, err := c.do("GET", "/networks", doOptions{})
+ resp, err := c.do(http.MethodGet, "/networks", doOptions{})
if err != nil {
return nil, err
}
@@ -75,7 +75,7 @@ func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error)
qs := make(url.Values)
qs.Add("filters", string(params))
path := "/networks?" + qs.Encode()
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
return nil, err
}
@@ -92,7 +92,7 @@ func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error)
// See https://goo.gl/6GugX3 for more details.
func (c *Client) NetworkInfo(id string) (*Network, error) {
path := "/networks/" + id
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchNetwork{ID: id}
@@ -159,7 +159,7 @@ type IPAMConfig struct {
// See https://goo.gl/6GugX3 for more details.
func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) {
resp, err := c.do(
- "POST",
+ http.MethodPost,
"/networks/create",
doOptions{
data: opts,
@@ -193,7 +193,7 @@ func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) {
//
// See https://goo.gl/6GugX3 for more details.
func (c *Client) RemoveNetwork(id string) error {
- resp, err := c.do("DELETE", "/networks/"+id, doOptions{})
+ resp, err := c.do(http.MethodDelete, "/networks/"+id, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchNetwork{ID: id}
@@ -253,7 +253,7 @@ type EndpointIPAMConfig struct {
//
// See https://goo.gl/6GugX3 for more details.
func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error {
- resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{
+ resp, err := c.do(http.MethodPost, "/networks/"+id+"/connect", doOptions{
data: opts,
context: opts.Context,
})
@@ -272,7 +272,7 @@ func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error
//
// See https://goo.gl/6GugX3 for more details.
func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error {
- resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts})
+ resp, err := c.do(http.MethodPost, "/networks/"+id+"/disconnect", doOptions{data: opts})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container}
@@ -303,7 +303,7 @@ type PruneNetworksResults struct {
// See https://goo.gl/kX0S9h for more details.
func (c *Client) PruneNetworks(opts PruneNetworksOptions) (*PruneNetworksResults, error) {
path := "/networks/prune?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go
index 088790313..9cec41512 100644
--- a/vendor/github.com/fsouza/go-dockerclient/plugin.go
+++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go
@@ -35,15 +35,26 @@ type InstallPluginOptions struct {
//
// See https://goo.gl/C4t7Tz for more details.
func (c *Client) InstallPlugins(opts InstallPluginOptions) error {
+ headers, err := headersWithAuth(opts.Auth)
+ if err != nil {
+ return err
+ }
+
path := "/plugins/pull?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
data: opts.Plugins,
context: opts.Context,
+ headers: headers,
})
if err != nil {
return err
}
- resp.Body.Close()
+ defer resp.Body.Close()
+ // PullPlugin streams back the progress of the pull, we must consume the whole body
+ // otherwise the pull will be canceled on the engine.
+ if _, err := ioutil.ReadAll(resp.Body); err != nil {
+ return err
+ }
return nil
}
@@ -152,7 +163,7 @@ type PluginDetail struct {
//
// See https://goo.gl/C4t7Tz for more details.
func (c *Client) ListPlugins(ctx context.Context) ([]PluginDetail, error) {
- resp, err := c.do("GET", "/plugins", doOptions{
+ resp, err := c.do(http.MethodGet, "/plugins", doOptions{
context: ctx,
})
if err != nil {
@@ -179,7 +190,7 @@ type ListFilteredPluginsOptions struct {
// See https://goo.gl/rmdmWg for more details.
func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginDetail, error) {
path := "/plugins/json?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{
+ resp, err := c.do(http.MethodGet, path, doOptions{
context: opts.Context,
})
if err != nil {
@@ -193,12 +204,41 @@ func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginD
return pluginDetails, nil
}
-// GetPluginPrivileges returns pulginPrivileges or an error.
+// GetPluginPrivileges returns pluginPrivileges or an error.
//
// See https://goo.gl/C4t7Tz for more details.
-func (c *Client) GetPluginPrivileges(name string, ctx context.Context) ([]PluginPrivilege, error) {
- resp, err := c.do("GET", "/plugins/privileges?remote="+name, doOptions{
- context: ctx,
+//nolint:golint
+func (c *Client) GetPluginPrivileges(remote string, ctx context.Context) ([]PluginPrivilege, error) {
+ return c.GetPluginPrivilegesWithOptions(
+ GetPluginPrivilegesOptions{
+ Remote: remote,
+ Context: ctx,
+ })
+}
+
+// GetPluginPrivilegesOptions specify parameters to the GetPluginPrivilegesWithOptions function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type GetPluginPrivilegesOptions struct {
+ Remote string
+ Auth AuthConfiguration
+ Context context.Context
+}
+
+// GetPluginPrivilegesWithOptions returns pluginPrivileges or an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+//nolint:golint
+func (c *Client) GetPluginPrivilegesWithOptions(opts GetPluginPrivilegesOptions) ([]PluginPrivilege, error) {
+ headers, err := headersWithAuth(opts.Auth)
+ if err != nil {
+ return nil, err
+ }
+
+ path := "/plugins/privileges?" + queryString(opts)
+ resp, err := c.do(http.MethodGet, path, doOptions{
+ context: opts.Context,
+ headers: headers,
})
if err != nil {
return nil, err
@@ -214,21 +254,18 @@ func (c *Client) GetPluginPrivileges(name string, ctx context.Context) ([]Plugin
// InspectPlugins returns a pluginDetail or an error.
//
// See https://goo.gl/C4t7Tz for more details.
+//nolint:golint
func (c *Client) InspectPlugins(name string, ctx context.Context) (*PluginDetail, error) {
- resp, err := c.do("GET", "/plugins/"+name+"/json", doOptions{
+ resp, err := c.do(http.MethodGet, "/plugins/"+name+"/json", doOptions{
context: ctx,
})
if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchPlugin{ID: name}
}
return nil, err
}
- resp.Body.Close()
+ defer resp.Body.Close()
var pluginDetail PluginDetail
if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil {
return nil, err
@@ -252,20 +289,26 @@ type RemovePluginOptions struct {
// See https://goo.gl/C4t7Tz for more details.
func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) {
path := "/plugins/" + opts.Name + "?" + queryString(opts)
- resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})
if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchPlugin{ID: opts.Name}
+ }
return nil, err
}
defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return nil, &NoSuchPlugin{ID: opts.Name}
- }
return nil, err
}
- resp.Body.Close()
+
+ if len(body) == 0 {
+ // Seems like newer docker versions won't return the plugindetail after removal
+ return nil, nil
+ }
+
var pluginDetail PluginDetail
- if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil {
+ if err := json.Unmarshal(body, &pluginDetail); err != nil {
return nil, err
}
return &pluginDetail, nil
@@ -287,7 +330,7 @@ type EnablePluginOptions struct {
// See https://goo.gl/C4t7Tz for more details.
func (c *Client) EnablePlugin(opts EnablePluginOptions) error {
path := "/plugins/" + opts.Name + "/enable?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
if err != nil {
return err
}
@@ -310,7 +353,7 @@ type DisablePluginOptions struct {
// See https://goo.gl/C4t7Tz for more details.
func (c *Client) DisablePlugin(opts DisablePluginOptions) error {
path := "/plugins/" + opts.Name + "/disable"
- resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
if err != nil {
return err
}
@@ -335,7 +378,7 @@ type CreatePluginOptions struct {
// See https://goo.gl/C4t7Tz for more details.
func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) {
path := "/plugins/create?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
data: opts.Path,
context: opts.Context,
})
@@ -365,7 +408,7 @@ type PushPluginOptions struct {
// See https://goo.gl/C4t7Tz for more details.
func (c *Client) PushPlugin(opts PushPluginOptions) error {
path := "/plugins/" + opts.Name + "/push"
- resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
if err != nil {
return err
}
@@ -389,7 +432,7 @@ type ConfigurePluginOptions struct {
// See https://goo.gl/C4t7Tz for more details.
func (c *Client) ConfigurePlugin(opts ConfigurePluginOptions) error {
path := "/plugins/" + opts.Name + "/set"
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
data: opts.Envs,
context: opts.Context,
})
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm.go b/vendor/github.com/fsouza/go-dockerclient/swarm.go
index a257758fc..c1bbce763 100644
--- a/vendor/github.com/fsouza/go-dockerclient/swarm.go
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm.go
@@ -36,7 +36,7 @@ type InitSwarmOptions struct {
// See https://goo.gl/ZWyG1M for more details.
func (c *Client) InitSwarm(opts InitSwarmOptions) (string, error) {
path := "/swarm/init"
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
data: opts.InitRequest,
forceJSON: true,
context: opts.Context,
@@ -66,7 +66,7 @@ type JoinSwarmOptions struct {
// See https://goo.gl/N59IP1 for more details.
func (c *Client) JoinSwarm(opts JoinSwarmOptions) error {
path := "/swarm/join"
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
data: opts.JoinRequest,
forceJSON: true,
context: opts.Context,
@@ -93,7 +93,7 @@ func (c *Client) LeaveSwarm(opts LeaveSwarmOptions) error {
params := make(url.Values)
params.Set("force", strconv.FormatBool(opts.Force))
path := "/swarm/leave?" + params.Encode()
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
context: opts.Context,
})
if err != nil {
@@ -123,7 +123,7 @@ func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error {
params.Set("rotateWorkerToken", strconv.FormatBool(opts.RotateWorkerToken))
params.Set("rotateManagerToken", strconv.FormatBool(opts.RotateManagerToken))
path := "/swarm/update?" + params.Encode()
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
data: opts.Swarm,
forceJSON: true,
context: opts.Context,
@@ -141,7 +141,7 @@ func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error {
// See https://goo.gl/MFwgX9 for more details.
func (c *Client) InspectSwarm(ctx context.Context) (swarm.Swarm, error) {
response := swarm.Swarm{}
- resp, err := c.do("GET", "/swarm", doOptions{
+ resp, err := c.do(http.MethodGet, "/swarm", doOptions{
context: ctx,
})
if err != nil {
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go
index fb73ab2ef..399aa1dce 100644
--- a/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go
@@ -46,7 +46,7 @@ func (c *Client) CreateConfig(opts CreateConfigOptions) (*swarm.Config, error) {
return nil, err
}
path := "/configs/create?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
headers: headers,
data: opts.ConfigSpec,
forceJSON: true,
@@ -76,7 +76,7 @@ type RemoveConfigOptions struct {
// See https://goo.gl/Tqrtya for more details.
func (c *Client) RemoveConfig(opts RemoveConfigOptions) error {
path := "/configs/" + opts.ID
- resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchConfig{ID: opts.ID}
@@ -109,7 +109,7 @@ func (c *Client) UpdateConfig(id string, opts UpdateConfigOptions) error {
}
params := make(url.Values)
params.Set("version", strconv.FormatUint(opts.Version, 10))
- resp, err := c.do("POST", "/configs/"+id+"/update?"+params.Encode(), doOptions{
+ resp, err := c.do(http.MethodPost, "/configs/"+id+"/update?"+params.Encode(), doOptions{
headers: headers,
data: opts.ConfigSpec,
forceJSON: true,
@@ -130,7 +130,7 @@ func (c *Client) UpdateConfig(id string, opts UpdateConfigOptions) error {
// See https://goo.gl/dHmr75 for more details.
func (c *Client) InspectConfig(id string) (*swarm.Config, error) {
path := "/configs/" + id
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchConfig{ID: id}
@@ -158,7 +158,7 @@ type ListConfigsOptions struct {
// See https://goo.gl/DwvNMd for more details.
func (c *Client) ListConfigs(opts ListConfigsOptions) ([]swarm.Config, error) {
path := "/configs?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_node.go b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go
index 095653cd9..c149db287 100644
--- a/vendor/github.com/fsouza/go-dockerclient/swarm_node.go
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go
@@ -40,7 +40,7 @@ type ListNodesOptions struct {
// See http://goo.gl/3K4GwU for more details.
func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) {
path := "/nodes?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
@@ -56,7 +56,7 @@ func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) {
//
// See http://goo.gl/WjkTOk for more details.
func (c *Client) InspectNode(id string) (*swarm.Node, error) {
- resp, err := c.do("GET", "/nodes/"+id, doOptions{})
+ resp, err := c.do(http.MethodGet, "/nodes/"+id, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchNode{ID: id}
@@ -87,7 +87,7 @@ func (c *Client) UpdateNode(id string, opts UpdateNodeOptions) error {
params := make(url.Values)
params.Set("version", strconv.FormatUint(opts.Version, 10))
path := "/nodes/" + id + "/update?" + params.Encode()
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
context: opts.Context,
forceJSON: true,
data: opts.NodeSpec,
@@ -118,7 +118,7 @@ func (c *Client) RemoveNode(opts RemoveNodeOptions) error {
params := make(url.Values)
params.Set("force", strconv.FormatBool(opts.Force))
path := "/nodes/" + opts.ID + "?" + params.Encode()
- resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchNode{ID: opts.ID}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go
index 5a3b82ca5..058c4a4af 100644
--- a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go
@@ -46,7 +46,7 @@ func (c *Client) CreateSecret(opts CreateSecretOptions) (*swarm.Secret, error) {
return nil, err
}
path := "/secrets/create?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
headers: headers,
data: opts.SecretSpec,
forceJSON: true,
@@ -76,7 +76,7 @@ type RemoveSecretOptions struct {
// See https://goo.gl/Tqrtya for more details.
func (c *Client) RemoveSecret(opts RemoveSecretOptions) error {
path := "/secrets/" + opts.ID
- resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchSecret{ID: opts.ID}
@@ -109,7 +109,7 @@ func (c *Client) UpdateSecret(id string, opts UpdateSecretOptions) error {
}
params := make(url.Values)
params.Set("version", strconv.FormatUint(opts.Version, 10))
- resp, err := c.do("POST", "/secrets/"+id+"/update?"+params.Encode(), doOptions{
+ resp, err := c.do(http.MethodPost, "/secrets/"+id+"/update?"+params.Encode(), doOptions{
headers: headers,
data: opts.SecretSpec,
forceJSON: true,
@@ -130,7 +130,7 @@ func (c *Client) UpdateSecret(id string, opts UpdateSecretOptions) error {
// See https://goo.gl/dHmr75 for more details.
func (c *Client) InspectSecret(id string) (*swarm.Secret, error) {
path := "/secrets/" + id
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchSecret{ID: id}
@@ -158,7 +158,7 @@ type ListSecretsOptions struct {
// See https://goo.gl/DwvNMd for more details.
func (c *Client) ListSecrets(opts ListSecretsOptions) ([]swarm.Secret, error) {
path := "/secrets?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go
index d9c4b2acc..cedbe41e3 100644
--- a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go
@@ -46,7 +46,7 @@ func (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error
return nil, err
}
path := "/services/create?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{
+ resp, err := c.do(http.MethodPost, path, doOptions{
headers: headers,
data: opts.ServiceSpec,
forceJSON: true,
@@ -76,7 +76,7 @@ type RemoveServiceOptions struct {
// See https://goo.gl/Tqrtya for more details.
func (c *Client) RemoveService(opts RemoveServiceOptions) error {
path := "/services/" + opts.ID
- resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchService{ID: opts.ID}
@@ -106,7 +106,7 @@ func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error {
if err != nil {
return err
}
- resp, err := c.do("POST", "/services/"+id+"/update?"+queryString(opts), doOptions{
+ resp, err := c.do(http.MethodPost, "/services/"+id+"/update?"+queryString(opts), doOptions{
headers: headers,
data: opts.ServiceSpec,
forceJSON: true,
@@ -127,7 +127,7 @@ func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error {
// See https://goo.gl/dHmr75 for more details.
func (c *Client) InspectService(id string) (*swarm.Service, error) {
path := "/services/" + id
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do(http.MethodGet, path, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchService{ID: id}
@@ -155,7 +155,7 @@ type ListServicesOptions struct {
// See https://goo.gl/DwvNMd for more details.
func (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) {
path := "/services?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
@@ -203,7 +203,7 @@ func (c *Client) GetServiceLogs(opts LogsServiceOptions) error {
opts.Tail = "all"
}
path := "/services/" + opts.Service + "/logs?" + queryString(opts)
- return c.stream("GET", path, streamOptions{
+ return c.stream(http.MethodGet, path, streamOptions{
setRawTerminal: opts.RawTerminal,
stdout: opts.OutputStream,
stderr: opts.ErrorStream,
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_task.go b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go
index 3b1161ab9..547642f5e 100644
--- a/vendor/github.com/fsouza/go-dockerclient/swarm_task.go
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go
@@ -38,7 +38,7 @@ type ListTasksOptions struct {
// See http://goo.gl/rByLzw for more details.
func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) {
path := "/tasks?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
@@ -54,7 +54,7 @@ func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) {
//
// See http://goo.gl/kyziuq for more details.
func (c *Client) InspectTask(id string) (*swarm.Task, error) {
- resp, err := c.do("GET", "/tasks/"+id, doOptions{})
+ resp, err := c.do(http.MethodGet, "/tasks/"+id, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchTask{ID: id}
diff --git a/vendor/github.com/fsouza/go-dockerclient/system.go b/vendor/github.com/fsouza/go-dockerclient/system.go
index a43dfb5a2..46b9faf00 100644
--- a/vendor/github.com/fsouza/go-dockerclient/system.go
+++ b/vendor/github.com/fsouza/go-dockerclient/system.go
@@ -3,6 +3,7 @@ package docker
import (
"context"
"encoding/json"
+ "net/http"
)
// VolumeUsageData represents usage data from the docker system api
@@ -59,7 +60,7 @@ type DiskUsageOptions struct {
// More Info Here https://dockr.ly/2PNzQyO
func (c *Client) DiskUsage(opts DiskUsageOptions) (*DiskUsage, error) {
path := "/system/df"
- resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go
index 611da8c9e..9716a7712 100644
--- a/vendor/github.com/fsouza/go-dockerclient/tar.go
+++ b/vendor/github.com/fsouza/go-dockerclient/tar.go
@@ -13,8 +13,8 @@ import (
"path/filepath"
"strings"
+ "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
- "github.com/fsouza/go-dockerclient/internal/archive"
)
func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go
index 07661f3d1..08e7f8ec2 100644
--- a/vendor/github.com/fsouza/go-dockerclient/tls.go
+++ b/vendor/github.com/fsouza/go-dockerclient/tls.go
@@ -103,7 +103,7 @@ func copyTLSConfig(cfg *tls.Config) *tls.Config {
ClientCAs: cfg.ClientCAs,
ClientSessionCache: cfg.ClientSessionCache,
CurvePreferences: cfg.CurvePreferences,
- InsecureSkipVerify: cfg.InsecureSkipVerify,
+ InsecureSkipVerify: cfg.InsecureSkipVerify, //nolint:gosec
MaxVersion: cfg.MaxVersion,
MinVersion: cfg.MinVersion,
NameToCertificate: cfg.NameToCertificate,
diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go
index c8f50469e..c39a273bf 100644
--- a/vendor/github.com/fsouza/go-dockerclient/volume.go
+++ b/vendor/github.com/fsouza/go-dockerclient/volume.go
@@ -44,7 +44,7 @@ type ListVolumesOptions struct {
//
// See https://goo.gl/3wgTsd for more details.
func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) {
- resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{
+ resp, err := c.do(http.MethodGet, "/volumes?"+queryString(opts), doOptions{
context: opts.Context,
})
if err != nil {
@@ -85,7 +85,7 @@ type CreateVolumeOptions struct {
//
// See https://goo.gl/qEhmEC for more details.
func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) {
- resp, err := c.do("POST", "/volumes/create", doOptions{
+ resp, err := c.do(http.MethodPost, "/volumes/create", doOptions{
data: opts,
context: opts.Context,
})
@@ -104,7 +104,7 @@ func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) {
//
// See https://goo.gl/GMjsMc for more details.
func (c *Client) InspectVolume(name string) (*Volume, error) {
- resp, err := c.do("GET", "/volumes/"+name, doOptions{})
+ resp, err := c.do(http.MethodGet, "/volumes/"+name, doOptions{})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, ErrNoSuchVolume
@@ -142,7 +142,7 @@ type RemoveVolumeOptions struct {
// See https://goo.gl/nvd6qj for more details.
func (c *Client) RemoveVolumeWithOptions(opts RemoveVolumeOptions) error {
path := "/volumes/" + opts.Name
- resp, err := c.do("DELETE", path+"?"+queryString(opts), doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodDelete, path+"?"+queryString(opts), doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok {
if e.Status == http.StatusNotFound {
@@ -179,7 +179,7 @@ type PruneVolumesResults struct {
// See https://goo.gl/f9XDem for more details.
func (c *Client) PruneVolumes(opts PruneVolumesOptions) (*PruneVolumesResults, error) {
path := "/volumes/prune?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/ijc/Gotty/LICENSE b/vendor/github.com/ijc/Gotty/LICENSE
deleted file mode 100644
index 0b71c9736..000000000
--- a/vendor/github.com/ijc/Gotty/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com)
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-The views and conclusions contained in the software and documentation are those
-of the authors and should not be interpreted as representing official policies,
-either expressed or implied, of the FreeBSD Project.
diff --git a/vendor/github.com/ijc/Gotty/README b/vendor/github.com/ijc/Gotty/README
deleted file mode 100644
index a6b0d9a8f..000000000
--- a/vendor/github.com/ijc/Gotty/README
+++ /dev/null
@@ -1,5 +0,0 @@
-Gotty is a library written in Go that determines and reads termcap database
-files to produce an interface for interacting with the capabilities of a
-terminal.
-See the godoc documentation or the source code for more information about
-function usage.
diff --git a/vendor/github.com/ijc/Gotty/TODO b/vendor/github.com/ijc/Gotty/TODO
deleted file mode 100644
index 470460531..000000000
--- a/vendor/github.com/ijc/Gotty/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-gotty.go:// TODO add more concurrency to name lookup, look for more opportunities.
-all:// TODO add more documentation, with function usage in a doc.go file.
-all:// TODO add more testing/benchmarking with go test.
diff --git a/vendor/github.com/ijc/Gotty/attributes.go b/vendor/github.com/ijc/Gotty/attributes.go
deleted file mode 100644
index a4c005fae..000000000
--- a/vendor/github.com/ijc/Gotty/attributes.go
+++ /dev/null
@@ -1,514 +0,0 @@
-// Copyright 2012 Neal van Veen. All rights reserved.
-// Usage of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package gotty
-
-// Boolean capabilities
-var BoolAttr = [...]string{
- "auto_left_margin", "bw",
- "auto_right_margin", "am",
- "no_esc_ctlc", "xsb",
- "ceol_standout_glitch", "xhp",
- "eat_newline_glitch", "xenl",
- "erase_overstrike", "eo",
- "generic_type", "gn",
- "hard_copy", "hc",
- "has_meta_key", "km",
- "has_status_line", "hs",
- "insert_null_glitch", "in",
- "memory_above", "da",
- "memory_below", "db",
- "move_insert_mode", "mir",
- "move_standout_mode", "msgr",
- "over_strike", "os",
- "status_line_esc_ok", "eslok",
- "dest_tabs_magic_smso", "xt",
- "tilde_glitch", "hz",
- "transparent_underline", "ul",
- "xon_xoff", "nxon",
- "needs_xon_xoff", "nxon",
- "prtr_silent", "mc5i",
- "hard_cursor", "chts",
- "non_rev_rmcup", "nrrmc",
- "no_pad_char", "npc",
- "non_dest_scroll_region", "ndscr",
- "can_change", "ccc",
- "back_color_erase", "bce",
- "hue_lightness_saturation", "hls",
- "col_addr_glitch", "xhpa",
- "cr_cancels_micro_mode", "crxm",
- "has_print_wheel", "daisy",
- "row_addr_glitch", "xvpa",
- "semi_auto_right_margin", "sam",
- "cpi_changes_res", "cpix",
- "lpi_changes_res", "lpix",
- "backspaces_with_bs", "",
- "crt_no_scrolling", "",
- "no_correctly_working_cr", "",
- "gnu_has_meta_key", "",
- "linefeed_is_newline", "",
- "has_hardware_tabs", "",
- "return_does_clr_eol", "",
-}
-
-// Numerical capabilities
-var NumAttr = [...]string{
- "columns", "cols",
- "init_tabs", "it",
- "lines", "lines",
- "lines_of_memory", "lm",
- "magic_cookie_glitch", "xmc",
- "padding_baud_rate", "pb",
- "virtual_terminal", "vt",
- "width_status_line", "wsl",
- "num_labels", "nlab",
- "label_height", "lh",
- "label_width", "lw",
- "max_attributes", "ma",
- "maximum_windows", "wnum",
- "max_colors", "colors",
- "max_pairs", "pairs",
- "no_color_video", "ncv",
- "buffer_capacity", "bufsz",
- "dot_vert_spacing", "spinv",
- "dot_horz_spacing", "spinh",
- "max_micro_address", "maddr",
- "max_micro_jump", "mjump",
- "micro_col_size", "mcs",
- "micro_line_size", "mls",
- "number_of_pins", "npins",
- "output_res_char", "orc",
- "output_res_line", "orl",
- "output_res_horz_inch", "orhi",
- "output_res_vert_inch", "orvi",
- "print_rate", "cps",
- "wide_char_size", "widcs",
- "buttons", "btns",
- "bit_image_entwining", "bitwin",
- "bit_image_type", "bitype",
- "magic_cookie_glitch_ul", "",
- "carriage_return_delay", "",
- "new_line_delay", "",
- "backspace_delay", "",
- "horizontal_tab_delay", "",
- "number_of_function_keys", "",
-}
-
-// String capabilities
-var StrAttr = [...]string{
- "back_tab", "cbt",
- "bell", "bel",
- "carriage_return", "cr",
- "change_scroll_region", "csr",
- "clear_all_tabs", "tbc",
- "clear_screen", "clear",
- "clr_eol", "el",
- "clr_eos", "ed",
- "column_address", "hpa",
- "command_character", "cmdch",
- "cursor_address", "cup",
- "cursor_down", "cud1",
- "cursor_home", "home",
- "cursor_invisible", "civis",
- "cursor_left", "cub1",
- "cursor_mem_address", "mrcup",
- "cursor_normal", "cnorm",
- "cursor_right", "cuf1",
- "cursor_to_ll", "ll",
- "cursor_up", "cuu1",
- "cursor_visible", "cvvis",
- "delete_character", "dch1",
- "delete_line", "dl1",
- "dis_status_line", "dsl",
- "down_half_line", "hd",
- "enter_alt_charset_mode", "smacs",
- "enter_blink_mode", "blink",
- "enter_bold_mode", "bold",
- "enter_ca_mode", "smcup",
- "enter_delete_mode", "smdc",
- "enter_dim_mode", "dim",
- "enter_insert_mode", "smir",
- "enter_secure_mode", "invis",
- "enter_protected_mode", "prot",
- "enter_reverse_mode", "rev",
- "enter_standout_mode", "smso",
- "enter_underline_mode", "smul",
- "erase_chars", "ech",
- "exit_alt_charset_mode", "rmacs",
- "exit_attribute_mode", "sgr0",
- "exit_ca_mode", "rmcup",
- "exit_delete_mode", "rmdc",
- "exit_insert_mode", "rmir",
- "exit_standout_mode", "rmso",
- "exit_underline_mode", "rmul",
- "flash_screen", "flash",
- "form_feed", "ff",
- "from_status_line", "fsl",
- "init_1string", "is1",
- "init_2string", "is2",
- "init_3string", "is3",
- "init_file", "if",
- "insert_character", "ich1",
- "insert_line", "il1",
- "insert_padding", "ip",
- "key_backspace", "kbs",
- "key_catab", "ktbc",
- "key_clear", "kclr",
- "key_ctab", "kctab",
- "key_dc", "kdch1",
- "key_dl", "kdl1",
- "key_down", "kcud1",
- "key_eic", "krmir",
- "key_eol", "kel",
- "key_eos", "ked",
- "key_f0", "kf0",
- "key_f1", "kf1",
- "key_f10", "kf10",
- "key_f2", "kf2",
- "key_f3", "kf3",
- "key_f4", "kf4",
- "key_f5", "kf5",
- "key_f6", "kf6",
- "key_f7", "kf7",
- "key_f8", "kf8",
- "key_f9", "kf9",
- "key_home", "khome",
- "key_ic", "kich1",
- "key_il", "kil1",
- "key_left", "kcub1",
- "key_ll", "kll",
- "key_npage", "knp",
- "key_ppage", "kpp",
- "key_right", "kcuf1",
- "key_sf", "kind",
- "key_sr", "kri",
- "key_stab", "khts",
- "key_up", "kcuu1",
- "keypad_local", "rmkx",
- "keypad_xmit", "smkx",
- "lab_f0", "lf0",
- "lab_f1", "lf1",
- "lab_f10", "lf10",
- "lab_f2", "lf2",
- "lab_f3", "lf3",
- "lab_f4", "lf4",
- "lab_f5", "lf5",
- "lab_f6", "lf6",
- "lab_f7", "lf7",
- "lab_f8", "lf8",
- "lab_f9", "lf9",
- "meta_off", "rmm",
- "meta_on", "smm",
- "newline", "_glitch",
- "pad_char", "npc",
- "parm_dch", "dch",
- "parm_delete_line", "dl",
- "parm_down_cursor", "cud",
- "parm_ich", "ich",
- "parm_index", "indn",
- "parm_insert_line", "il",
- "parm_left_cursor", "cub",
- "parm_right_cursor", "cuf",
- "parm_rindex", "rin",
- "parm_up_cursor", "cuu",
- "pkey_key", "pfkey",
- "pkey_local", "pfloc",
- "pkey_xmit", "pfx",
- "print_screen", "mc0",
- "prtr_off", "mc4",
- "prtr_on", "mc5",
- "repeat_char", "rep",
- "reset_1string", "rs1",
- "reset_2string", "rs2",
- "reset_3string", "rs3",
- "reset_file", "rf",
- "restore_cursor", "rc",
- "row_address", "mvpa",
- "save_cursor", "row_address",
- "scroll_forward", "ind",
- "scroll_reverse", "ri",
- "set_attributes", "sgr",
- "set_tab", "hts",
- "set_window", "wind",
- "tab", "s_magic_smso",
- "to_status_line", "tsl",
- "underline_char", "uc",
- "up_half_line", "hu",
- "init_prog", "iprog",
- "key_a1", "ka1",
- "key_a3", "ka3",
- "key_b2", "kb2",
- "key_c1", "kc1",
- "key_c3", "kc3",
- "prtr_non", "mc5p",
- "char_padding", "rmp",
- "acs_chars", "acsc",
- "plab_norm", "pln",
- "key_btab", "kcbt",
- "enter_xon_mode", "smxon",
- "exit_xon_mode", "rmxon",
- "enter_am_mode", "smam",
- "exit_am_mode", "rmam",
- "xon_character", "xonc",
- "xoff_character", "xoffc",
- "ena_acs", "enacs",
- "label_on", "smln",
- "label_off", "rmln",
- "key_beg", "kbeg",
- "key_cancel", "kcan",
- "key_close", "kclo",
- "key_command", "kcmd",
- "key_copy", "kcpy",
- "key_create", "kcrt",
- "key_end", "kend",
- "key_enter", "kent",
- "key_exit", "kext",
- "key_find", "kfnd",
- "key_help", "khlp",
- "key_mark", "kmrk",
- "key_message", "kmsg",
- "key_move", "kmov",
- "key_next", "knxt",
- "key_open", "kopn",
- "key_options", "kopt",
- "key_previous", "kprv",
- "key_print", "kprt",
- "key_redo", "krdo",
- "key_reference", "kref",
- "key_refresh", "krfr",
- "key_replace", "krpl",
- "key_restart", "krst",
- "key_resume", "kres",
- "key_save", "ksav",
- "key_suspend", "kspd",
- "key_undo", "kund",
- "key_sbeg", "kBEG",
- "key_scancel", "kCAN",
- "key_scommand", "kCMD",
- "key_scopy", "kCPY",
- "key_screate", "kCRT",
- "key_sdc", "kDC",
- "key_sdl", "kDL",
- "key_select", "kslt",
- "key_send", "kEND",
- "key_seol", "kEOL",
- "key_sexit", "kEXT",
- "key_sfind", "kFND",
- "key_shelp", "kHLP",
- "key_shome", "kHOM",
- "key_sic", "kIC",
- "key_sleft", "kLFT",
- "key_smessage", "kMSG",
- "key_smove", "kMOV",
- "key_snext", "kNXT",
- "key_soptions", "kOPT",
- "key_sprevious", "kPRV",
- "key_sprint", "kPRT",
- "key_sredo", "kRDO",
- "key_sreplace", "kRPL",
- "key_sright", "kRIT",
- "key_srsume", "kRES",
- "key_ssave", "kSAV",
- "key_ssuspend", "kSPD",
- "key_sundo", "kUND",
- "req_for_input", "rfi",
- "key_f11", "kf11",
- "key_f12", "kf12",
- "key_f13", "kf13",
- "key_f14", "kf14",
- "key_f15", "kf15",
- "key_f16", "kf16",
- "key_f17", "kf17",
- "key_f18", "kf18",
- "key_f19", "kf19",
- "key_f20", "kf20",
- "key_f21", "kf21",
- "key_f22", "kf22",
- "key_f23", "kf23",
- "key_f24", "kf24",
- "key_f25", "kf25",
- "key_f26", "kf26",
- "key_f27", "kf27",
- "key_f28", "kf28",
- "key_f29", "kf29",
- "key_f30", "kf30",
- "key_f31", "kf31",
- "key_f32", "kf32",
- "key_f33", "kf33",
- "key_f34", "kf34",
- "key_f35", "kf35",
- "key_f36", "kf36",
- "key_f37", "kf37",
- "key_f38", "kf38",
- "key_f39", "kf39",
- "key_f40", "kf40",
- "key_f41", "kf41",
- "key_f42", "kf42",
- "key_f43", "kf43",
- "key_f44", "kf44",
- "key_f45", "kf45",
- "key_f46", "kf46",
- "key_f47", "kf47",
- "key_f48", "kf48",
- "key_f49", "kf49",
- "key_f50", "kf50",
- "key_f51", "kf51",
- "key_f52", "kf52",
- "key_f53", "kf53",
- "key_f54", "kf54",
- "key_f55", "kf55",
- "key_f56", "kf56",
- "key_f57", "kf57",
- "key_f58", "kf58",
- "key_f59", "kf59",
- "key_f60", "kf60",
- "key_f61", "kf61",
- "key_f62", "kf62",
- "key_f63", "kf63",
- "clr_bol", "el1",
- "clear_margins", "mgc",
- "set_left_margin", "smgl",
- "set_right_margin", "smgr",
- "label_format", "fln",
- "set_clock", "sclk",
- "display_clock", "dclk",
- "remove_clock", "rmclk",
- "create_window", "cwin",
- "goto_window", "wingo",
- "hangup", "hup",
- "dial_phone", "dial",
- "quick_dial", "qdial",
- "tone", "tone",
- "pulse", "pulse",
- "flash_hook", "hook",
- "fixed_pause", "pause",
- "wait_tone", "wait",
- "user0", "u0",
- "user1", "u1",
- "user2", "u2",
- "user3", "u3",
- "user4", "u4",
- "user5", "u5",
- "user6", "u6",
- "user7", "u7",
- "user8", "u8",
- "user9", "u9",
- "orig_pair", "op",
- "orig_colors", "oc",
- "initialize_color", "initc",
- "initialize_pair", "initp",
- "set_color_pair", "scp",
- "set_foreground", "setf",
- "set_background", "setb",
- "change_char_pitch", "cpi",
- "change_line_pitch", "lpi",
- "change_res_horz", "chr",
- "change_res_vert", "cvr",
- "define_char", "defc",
- "enter_doublewide_mode", "swidm",
- "enter_draft_quality", "sdrfq",
- "enter_italics_mode", "sitm",
- "enter_leftward_mode", "slm",
- "enter_micro_mode", "smicm",
- "enter_near_letter_quality", "snlq",
- "enter_normal_quality", "snrmq",
- "enter_shadow_mode", "sshm",
- "enter_subscript_mode", "ssubm",
- "enter_superscript_mode", "ssupm",
- "enter_upward_mode", "sum",
- "exit_doublewide_mode", "rwidm",
- "exit_italics_mode", "ritm",
- "exit_leftward_mode", "rlm",
- "exit_micro_mode", "rmicm",
- "exit_shadow_mode", "rshm",
- "exit_subscript_mode", "rsubm",
- "exit_superscript_mode", "rsupm",
- "exit_upward_mode", "rum",
- "micro_column_address", "mhpa",
- "micro_down", "mcud1",
- "micro_left", "mcub1",
- "micro_right", "mcuf1",
- "micro_row_address", "mvpa",
- "micro_up", "mcuu1",
- "order_of_pins", "porder",
- "parm_down_micro", "mcud",
- "parm_left_micro", "mcub",
- "parm_right_micro", "mcuf",
- "parm_up_micro", "mcuu",
- "select_char_set", "scs",
- "set_bottom_margin", "smgb",
- "set_bottom_margin_parm", "smgbp",
- "set_left_margin_parm", "smglp",
- "set_right_margin_parm", "smgrp",
- "set_top_margin", "smgt",
- "set_top_margin_parm", "smgtp",
- "start_bit_image", "sbim",
- "start_char_set_def", "scsd",
- "stop_bit_image", "rbim",
- "stop_char_set_def", "rcsd",
- "subscript_characters", "subcs",
- "superscript_characters", "supcs",
- "these_cause_cr", "docr",
- "zero_motion", "zerom",
- "char_set_names", "csnm",
- "key_mouse", "kmous",
- "mouse_info", "minfo",
- "req_mouse_pos", "reqmp",
- "get_mouse", "getm",
- "set_a_foreground", "setaf",
- "set_a_background", "setab",
- "pkey_plab", "pfxl",
- "device_type", "devt",
- "code_set_init", "csin",
- "set0_des_seq", "s0ds",
- "set1_des_seq", "s1ds",
- "set2_des_seq", "s2ds",
- "set3_des_seq", "s3ds",
- "set_lr_margin", "smglr",
- "set_tb_margin", "smgtb",
- "bit_image_repeat", "birep",
- "bit_image_newline", "binel",
- "bit_image_carriage_return", "bicr",
- "color_names", "colornm",
- "define_bit_image_region", "defbi",
- "end_bit_image_region", "endbi",
- "set_color_band", "setcolor",
- "set_page_length", "slines",
- "display_pc_char", "dispc",
- "enter_pc_charset_mode", "smpch",
- "exit_pc_charset_mode", "rmpch",
- "enter_scancode_mode", "smsc",
- "exit_scancode_mode", "rmsc",
- "pc_term_options", "pctrm",
- "scancode_escape", "scesc",
- "alt_scancode_esc", "scesa",
- "enter_horizontal_hl_mode", "ehhlm",
- "enter_left_hl_mode", "elhlm",
- "enter_low_hl_mode", "elohlm",
- "enter_right_hl_mode", "erhlm",
- "enter_top_hl_mode", "ethlm",
- "enter_vertical_hl_mode", "evhlm",
- "set_a_attributes", "sgr1",
- "set_pglen_inch", "slength",
- "termcap_init2", "",
- "termcap_reset", "",
- "linefeed_if_not_lf", "",
- "backspace_if_not_bs", "",
- "other_non_function_keys", "",
- "arrow_key_map", "",
- "acs_ulcorner", "",
- "acs_llcorner", "",
- "acs_urcorner", "",
- "acs_lrcorner", "",
- "acs_ltee", "",
- "acs_rtee", "",
- "acs_btee", "",
- "acs_ttee", "",
- "acs_hline", "",
- "acs_vline", "",
- "acs_plus", "",
- "memory_lock", "",
- "memory_unlock", "",
- "box_chars_1", "",
-}
diff --git a/vendor/github.com/ijc/Gotty/gotty.go b/vendor/github.com/ijc/Gotty/gotty.go
deleted file mode 100644
index c329778a1..000000000
--- a/vendor/github.com/ijc/Gotty/gotty.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2012 Neal van Veen. All rights reserved.
-// Usage of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Gotty is a Go-package for reading and parsing the terminfo database
-package gotty
-
-// TODO add more concurrency to name lookup, look for more opportunities.
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "os"
- "path"
- "reflect"
- "strings"
- "sync"
-)
-
-// Open a terminfo file by the name given and construct a TermInfo object.
-// If something went wrong reading the terminfo database file, an error is
-// returned.
-func OpenTermInfo(termName string) (*TermInfo, error) {
- if len(termName) == 0 {
- return nil, errors.New("No termname given")
- }
- // Find the environment variables
- if termloc := os.Getenv("TERMINFO"); len(termloc) > 0 {
- return readTermInfo(path.Join(termloc, string(termName[0]), termName))
- } else {
- // Search like ncurses
- locations := []string{}
- if h := os.Getenv("HOME"); len(h) > 0 {
- locations = append(locations, path.Join(h, ".terminfo"))
- }
- locations = append(locations,
- "/etc/terminfo/",
- "/lib/terminfo/",
- "/usr/share/terminfo/")
- for _, str := range locations {
- term, err := readTermInfo(path.Join(str, string(termName[0]), termName))
- if err == nil {
- return term, nil
- }
- }
- return nil, errors.New("No terminfo file(-location) found")
- }
-}
-
-// Open a terminfo file from the environment variable containing the current
-// terminal name and construct a TermInfo object. If something went wrong
-// reading the terminfo database file, an error is returned.
-func OpenTermInfoEnv() (*TermInfo, error) {
- termenv := os.Getenv("TERM")
- return OpenTermInfo(termenv)
-}
-
-// Return an attribute by the name attr provided. If none can be found,
-// an error is returned.
-func (term *TermInfo) GetAttribute(attr string) (stacker, error) {
- // Channel to store the main value in.
- var value stacker
- // Add a blocking WaitGroup
- var block sync.WaitGroup
- // Keep track of variable being written.
- written := false
- // Function to put into goroutine.
- f := func(ats interface{}) {
- var ok bool
- var v stacker
- // Switch on type of map to use and assign value to it.
- switch reflect.TypeOf(ats).Elem().Kind() {
- case reflect.Bool:
- v, ok = ats.(map[string]bool)[attr]
- case reflect.Int16:
- v, ok = ats.(map[string]int16)[attr]
- case reflect.String:
- v, ok = ats.(map[string]string)[attr]
- }
- // If ok, a value is found, so we can write.
- if ok {
- value = v
- written = true
- }
- // Goroutine is done
- block.Done()
- }
- block.Add(3)
- // Go for all 3 attribute lists.
- go f(term.boolAttributes)
- go f(term.numAttributes)
- go f(term.strAttributes)
- // Wait until every goroutine is done.
- block.Wait()
- // If a value has been written, return it.
- if written {
- return value, nil
- }
- // Otherwise, error.
- return nil, fmt.Errorf("Erorr finding attribute")
-}
-
-// Return an attribute by the name attr provided. If none can be found,
-// an error is returned. A name is first converted to its termcap value.
-func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
- tc := GetTermcapName(name)
- return term.GetAttribute(tc)
-}
-
-// A utility function that finds and returns the termcap equivalent of a
-// variable name.
-func GetTermcapName(name string) string {
- // Termcap name
- var tc string
- // Blocking group
- var wait sync.WaitGroup
- // Function to put into a goroutine
- f := func(attrs []string) {
- // Find the string corresponding to the name
- for i, s := range attrs {
- if s == name {
- tc = attrs[i+1]
- }
- }
- // Goroutine is finished
- wait.Done()
- }
- wait.Add(3)
- // Go for all 3 attribute lists
- go f(BoolAttr[:])
- go f(NumAttr[:])
- go f(StrAttr[:])
- // Wait until every goroutine is done
- wait.Wait()
- // Return the termcap name
- return tc
-}
-
-// This function takes a path to a terminfo file and reads it in binary
-// form to construct the actual TermInfo file.
-func readTermInfo(path string) (*TermInfo, error) {
- // Open the terminfo file
- file, err := os.Open(path)
- defer file.Close()
- if err != nil {
- return nil, err
- }
-
- // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize
- // Header is composed of the magic 0432 octal number, size of the name
- // section, size of the boolean section, the amount of number values,
- // the number of offsets of strings, and the size of the string section.
- var header [6]int16
- // Byte array is used to read in byte values
- var byteArray []byte
- // Short array is used to read in short values
- var shArray []int16
- // TermInfo object to store values
- var term TermInfo
-
- // Read in the header
- err = binary.Read(file, binary.LittleEndian, &header)
- if err != nil {
- return nil, err
- }
- // If magic number isn't there or isn't correct, we have the wrong filetype
- if header[0] != 0432 {
- return nil, errors.New(fmt.Sprintf("Wrong filetype"))
- }
-
- // Read in the names
- byteArray = make([]byte, header[1])
- err = binary.Read(file, binary.LittleEndian, &byteArray)
- if err != nil {
- return nil, err
- }
- term.Names = strings.Split(string(byteArray), "|")
-
- // Read in the booleans
- byteArray = make([]byte, header[2])
- err = binary.Read(file, binary.LittleEndian, &byteArray)
- if err != nil {
- return nil, err
- }
- term.boolAttributes = make(map[string]bool)
- for i, b := range byteArray {
- if b == 1 {
- term.boolAttributes[BoolAttr[i*2+1]] = true
- }
- }
- // If the number of bytes read is not even, a byte for alignment is added
- // We know the header is an even number of bytes so only need to check the
- // total of the names and booleans.
- if (header[1]+header[2])%2 != 0 {
- err = binary.Read(file, binary.LittleEndian, make([]byte, 1))
- if err != nil {
- return nil, err
- }
- }
-
- // Read in shorts
- shArray = make([]int16, header[3])
- err = binary.Read(file, binary.LittleEndian, &shArray)
- if err != nil {
- return nil, err
- }
- term.numAttributes = make(map[string]int16)
- for i, n := range shArray {
- if n != 0377 && n > -1 {
- term.numAttributes[NumAttr[i*2+1]] = n
- }
- }
-
- // Read the offsets into the short array
- shArray = make([]int16, header[4])
- err = binary.Read(file, binary.LittleEndian, &shArray)
- if err != nil {
- return nil, err
- }
- // Read the actual strings in the byte array
- byteArray = make([]byte, header[5])
- err = binary.Read(file, binary.LittleEndian, &byteArray)
- if err != nil {
- return nil, err
- }
- term.strAttributes = make(map[string]string)
- // We get an offset, and then iterate until the string is null-terminated
- for i, offset := range shArray {
- if offset > -1 {
- if int(offset) >= len(byteArray) {
- return nil, errors.New("array out of bounds reading string section")
- }
- r := bytes.IndexByte(byteArray[offset:], 0)
- if r == -1 {
- return nil, errors.New("missing nul byte reading string section")
- }
- r += int(offset)
- term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r])
- }
- }
- return &term, nil
-}
diff --git a/vendor/github.com/ijc/Gotty/parser.go b/vendor/github.com/ijc/Gotty/parser.go
deleted file mode 100644
index a9d5d23c5..000000000
--- a/vendor/github.com/ijc/Gotty/parser.go
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2012 Neal van Veen. All rights reserved.
-// Usage of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package gotty
-
-import (
- "bytes"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-var exp = [...]string{
- "%%",
- "%c",
- "%s",
- "%p(\\d)",
- "%P([A-z])",
- "%g([A-z])",
- "%'(.)'",
- "%{([0-9]+)}",
- "%l",
- "%\\+|%-|%\\*|%/|%m",
- "%&|%\\||%\\^",
- "%=|%>|%<",
- "%A|%O",
- "%!|%~",
- "%i",
- "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]",
- "%\\?(.*?);",
-}
-
-var regex *regexp.Regexp
-var staticVar map[byte]stacker
-
-// Parses the attribute that is received with name attr and parameters params.
-func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) {
- // Get the attribute name first.
- iface, err := term.GetAttribute(attr)
- str, ok := iface.(string)
- if err != nil {
- return "", err
- }
- if !ok {
- return str, errors.New("Only string capabilities can be parsed.")
- }
- // Construct the hidden parser struct so we can use a recursive stack based
- // parser.
- ps := &parser{}
- // Dynamic variables only exist in this context.
- ps.dynamicVar = make(map[byte]stacker, 26)
- ps.parameters = make([]stacker, len(params))
- // Convert the parameters to insert them into the parser struct.
- for i, x := range params {
- ps.parameters[i] = x
- }
- // Recursively walk and return.
- result, err := ps.walk(str)
- return result, err
-}
-
-// Parses the attribute that is received with name attr and parameters params.
-// Only works on full name of a capability that is given, which it uses to
-// search for the termcap name.
-func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) {
- tc := GetTermcapName(attr)
- return term.Parse(tc, params)
-}
-
-// Identify each token in a stack based manner and do the actual parsing.
-func (ps *parser) walk(attr string) (string, error) {
- // We use a buffer to get the modified string.
- var buf bytes.Buffer
- // Next, find and identify all tokens by their indices and strings.
- tokens := regex.FindAllStringSubmatch(attr, -1)
- if len(tokens) == 0 {
- return attr, nil
- }
- indices := regex.FindAllStringIndex(attr, -1)
- q := 0 // q counts the matches of one token
- // Iterate through the string per character.
- for i := 0; i < len(attr); i++ {
- // If the current position is an identified token, execute the following
- // steps.
- if q < len(indices) && i >= indices[q][0] && i < indices[q][1] {
- // Switch on token.
- switch {
- case tokens[q][0][:2] == "%%":
- // Literal percentage character.
- buf.WriteByte('%')
- case tokens[q][0][:2] == "%c":
- // Pop a character.
- c, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- buf.WriteByte(c.(byte))
- case tokens[q][0][:2] == "%s":
- // Pop a string.
- str, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- if _, ok := str.(string); !ok {
- return buf.String(), errors.New("Stack head is not a string")
- }
- buf.WriteString(str.(string))
- case tokens[q][0][:2] == "%p":
- // Push a parameter on the stack.
- index, err := strconv.ParseInt(tokens[q][1], 10, 8)
- index--
- if err != nil {
- return buf.String(), err
- }
- if int(index) >= len(ps.parameters) {
- return buf.String(), errors.New("Parameters index out of bound")
- }
- ps.st.push(ps.parameters[index])
- case tokens[q][0][:2] == "%P":
- // Pop a variable from the stack as a dynamic or static variable.
- val, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- index := tokens[q][2]
- if len(index) > 1 {
- errorStr := fmt.Sprintf("%s is not a valid dynamic variables index",
- index)
- return buf.String(), errors.New(errorStr)
- }
- // Specify either dynamic or static.
- if index[0] >= 'a' && index[0] <= 'z' {
- ps.dynamicVar[index[0]] = val
- } else if index[0] >= 'A' && index[0] <= 'Z' {
- staticVar[index[0]] = val
- }
- case tokens[q][0][:2] == "%g":
- // Push a variable from the stack as a dynamic or static variable.
- index := tokens[q][3]
- if len(index) > 1 {
- errorStr := fmt.Sprintf("%s is not a valid static variables index",
- index)
- return buf.String(), errors.New(errorStr)
- }
- var val stacker
- if index[0] >= 'a' && index[0] <= 'z' {
- val = ps.dynamicVar[index[0]]
- } else if index[0] >= 'A' && index[0] <= 'Z' {
- val = staticVar[index[0]]
- }
- ps.st.push(val)
- case tokens[q][0][:2] == "%'":
- // Push a character constant.
- con := tokens[q][4]
- if len(con) > 1 {
- errorStr := fmt.Sprintf("%s is not a valid character constant", con)
- return buf.String(), errors.New(errorStr)
- }
- ps.st.push(con[0])
- case tokens[q][0][:2] == "%{":
- // Push an integer constant.
- con, err := strconv.ParseInt(tokens[q][5], 10, 32)
- if err != nil {
- return buf.String(), err
- }
- ps.st.push(con)
- case tokens[q][0][:2] == "%l":
- // Push the length of the string that is popped from the stack.
- popStr, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- if _, ok := popStr.(string); !ok {
- errStr := fmt.Sprintf("Stack head is not a string")
- return buf.String(), errors.New(errStr)
- }
- ps.st.push(len(popStr.(string)))
- case tokens[q][0][:2] == "%?":
- // If-then-else construct. First, the whole string is identified and
- // then inside this substring, we can specify which parts to switch on.
- ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);")
- ifTokens := ifReg.FindStringSubmatch(tokens[q][0])
- var (
- ifStr string
- err error
- )
- // Parse the if-part to determine if-else.
- if len(ifTokens[1]) > 0 {
- ifStr, err = ps.walk(ifTokens[1])
- } else { // else
- ifStr, err = ps.walk(ifTokens[4])
- }
- // Return any errors
- if err != nil {
- return buf.String(), err
- } else if len(ifStr) > 0 {
- // Self-defined limitation, not sure if this is correct, but didn't
- // seem like it.
- return buf.String(), errors.New("If-clause cannot print statements")
- }
- var thenStr string
- // Pop the first value that is set by parsing the if-clause.
- choose, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- // Switch to if or else.
- if choose.(int) == 0 && len(ifTokens[1]) > 0 {
- thenStr, err = ps.walk(ifTokens[3])
- } else if choose.(int) != 0 {
- if len(ifTokens[1]) > 0 {
- thenStr, err = ps.walk(ifTokens[2])
- } else {
- thenStr, err = ps.walk(ifTokens[5])
- }
- }
- if err != nil {
- return buf.String(), err
- }
- buf.WriteString(thenStr)
- case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing
- fallthrough
- case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits.
- fallthrough
- case tokens[q][0][len(tokens[q][0])-1] == 'x':
- fallthrough
- case tokens[q][0][len(tokens[q][0])-1] == 'X':
- fallthrough
- case tokens[q][0][len(tokens[q][0])-1] == 's':
- token := tokens[q][0]
- // Remove the : that comes before a flag.
- if token[1] == ':' {
- token = token[:1] + token[2:]
- }
- digit, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- // The rest is determined like the normal formatted prints.
- digitStr := fmt.Sprintf(token, digit.(int))
- buf.WriteString(digitStr)
- case tokens[q][0][:2] == "%i":
- // Increment the parameters by one.
- if len(ps.parameters) < 2 {
- return buf.String(), errors.New("Not enough parameters to increment.")
- }
- val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int)
- val1++
- val2++
- ps.parameters[0], ps.parameters[1] = val1, val2
- default:
- // The rest of the tokens is a special case, where two values are
- // popped and then operated on by the token that comes after them.
- op1, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- op2, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- var result stacker
- switch tokens[q][0][:2] {
- case "%+":
- // Addition
- result = op2.(int) + op1.(int)
- case "%-":
- // Subtraction
- result = op2.(int) - op1.(int)
- case "%*":
- // Multiplication
- result = op2.(int) * op1.(int)
- case "%/":
- // Division
- result = op2.(int) / op1.(int)
- case "%m":
- // Modulo
- result = op2.(int) % op1.(int)
- case "%&":
- // Bitwise AND
- result = op2.(int) & op1.(int)
- case "%|":
- // Bitwise OR
- result = op2.(int) | op1.(int)
- case "%^":
- // Bitwise XOR
- result = op2.(int) ^ op1.(int)
- case "%=":
- // Equals
- result = op2 == op1
- case "%>":
- // Greater-than
- result = op2.(int) > op1.(int)
- case "%<":
- // Lesser-than
- result = op2.(int) < op1.(int)
- case "%A":
- // Logical AND
- result = op2.(bool) && op1.(bool)
- case "%O":
- // Logical OR
- result = op2.(bool) || op1.(bool)
- case "%!":
- // Logical complement
- result = !op1.(bool)
- case "%~":
- // Bitwise complement
- result = ^(op1.(int))
- }
- ps.st.push(result)
- }
-
- i = indices[q][1] - 1
- q++
- } else {
- // We are not "inside" a token, so just skip until the end or the next
- // token, and add all characters to the buffer.
- j := i
- if q != len(indices) {
- for !(j >= indices[q][0] && j < indices[q][1]) {
- j++
- }
- } else {
- j = len(attr)
- }
- buf.WriteString(string(attr[i:j]))
- i = j
- }
- }
- // Return the buffer as a string.
- return buf.String(), nil
-}
-
-// Push a stacker-value onto the stack.
-func (st *stack) push(s stacker) {
- *st = append(*st, s)
-}
-
-// Pop a stacker-value from the stack.
-func (st *stack) pop() (stacker, error) {
- if len(*st) == 0 {
- return nil, errors.New("Stack is empty.")
- }
- newStack := make(stack, len(*st)-1)
- val := (*st)[len(*st)-1]
- copy(newStack, (*st)[:len(*st)-1])
- *st = newStack
- return val, nil
-}
-
-// Initialize regexes and the static vars (that don't get changed between
-// calls.
-func init() {
- // Initialize the main regex.
- expStr := strings.Join(exp[:], "|")
- regex, _ = regexp.Compile(expStr)
- // Initialize the static variables.
- staticVar = make(map[byte]stacker, 26)
-}
diff --git a/vendor/github.com/ijc/Gotty/types.go b/vendor/github.com/ijc/Gotty/types.go
deleted file mode 100644
index 9bcc65e9b..000000000
--- a/vendor/github.com/ijc/Gotty/types.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012 Neal van Veen. All rights reserved.
-// Usage of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package gotty
-
-type TermInfo struct {
- boolAttributes map[string]bool
- numAttributes map[string]int16
- strAttributes map[string]string
- // The various names of the TermInfo file.
- Names []string
-}
-
-type stacker interface {
-}
-type stack []stacker
-
-type parser struct {
- st stack
- parameters []stacker
- dynamicVar map[byte]stacker
-}
diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE
new file mode 100644
index 000000000..1c2640164
--- /dev/null
+++ b/vendor/github.com/morikuni/aec/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Taihei Morikuni
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md
new file mode 100644
index 000000000..3cbc4343e
--- /dev/null
+++ b/vendor/github.com/morikuni/aec/README.md
@@ -0,0 +1,178 @@
+# aec
+
+[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec)
+
+Go wrapper for ANSI escape code.
+
+## Install
+
+```bash
+go get github.com/morikuni/aec
+```
+
+## Features
+
+ANSI escape codes depend on terminal environment.
+Some of these features may not work.
+Check supported Font-Style/Font-Color features with [checkansi](./checkansi).
+
+[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail.
+
+### Cursor
+
+- `Up(n)`
+- `Down(n)`
+- `Right(n)`
+- `Left(n)`
+- `NextLine(n)`
+- `PreviousLine(n)`
+- `Column(col)`
+- `Position(row, col)`
+- `Save`
+- `Restore`
+- `Hide`
+- `Show`
+- `Report`
+
+### Erase
+
+- `EraseDisplay(mode)`
+- `EraseLine(mode)`
+
+### Scroll
+
+- `ScrollUp(n)`
+- `ScrollDown(n)`
+
+### Font Style
+
+- `Bold`
+- `Faint`
+- `Italic`
+- `Underline`
+- `BlinkSlow`
+- `BlinkRapid`
+- `Inverse`
+- `Conceal`
+- `CrossOut`
+- `Frame`
+- `Encircle`
+- `Overline`
+
+### Font Color
+
+Foreground color.
+
+- `DefaultF`
+- `BlackF`
+- `RedF`
+- `GreenF`
+- `YellowF`
+- `BlueF`
+- `MagentaF`
+- `CyanF`
+- `WhiteF`
+- `LightBlackF`
+- `LightRedF`
+- `LightGreenF`
+- `LightYellowF`
+- `LightBlueF`
+- `LightMagentaF`
+- `LightCyanF`
+- `LightWhiteF`
+- `Color3BitF(color)`
+- `Color8BitF(color)`
+- `FullColorF(r, g, b)`
+
+Background color.
+
+- `DefaultB`
+- `BlackB`
+- `RedB`
+- `GreenB`
+- `YellowB`
+- `BlueB`
+- `MagentaB`
+- `CyanB`
+- `WhiteB`
+- `LightBlackB`
+- `LightRedB`
+- `LightGreenB`
+- `LightYellowB`
+- `LightBlueB`
+- `LightMagentaB`
+- `LightCyanB`
+- `LightWhiteB`
+- `Color3BitB(color)`
+- `Color8BitB(color)`
+- `FullColorB(r, g, b)`
+
+### Color Converter
+
+24bit RGB color to ANSI color.
+
+- `NewRGB3Bit(r, g, b)`
+- `NewRGB8Bit(r, g, b)`
+
+### Builder
+
+To mix these features.
+
+```go
+custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI
+custom.Apply("Hello World")
+```
+
+## Usage
+
+1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI`
+2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))`
+
+`aec.Reset` should be added when using font style or font color features.
+
+## Example
+
+Simple progressbar.
+
+![sample](./sample.gif)
+
+```go
+package main
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/morikuni/aec"
+)
+
+func main() {
+ const n = 20
+ builder := aec.EmptyBuilder
+
+ up2 := aec.Up(2)
+ col := aec.Column(n + 2)
+ bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64))
+ label := builder.LightRedF().Underline().With(col).Right(1).ANSI
+
+ // for up2
+ fmt.Println()
+ fmt.Println()
+
+ for i := 0; i <= n; i++ {
+ fmt.Print(up2)
+ fmt.Println(label.Apply(fmt.Sprint(i, "/", n)))
+ fmt.Print("[")
+ fmt.Print(bar.Apply(strings.Repeat("=", i)))
+ fmt.Println(col.Apply("]"))
+ time.Sleep(100 * time.Millisecond)
+ }
+}
+```
+
+## License
+
+[MIT](./LICENSE)
+
+
diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go
new file mode 100644
index 000000000..566be6eb1
--- /dev/null
+++ b/vendor/github.com/morikuni/aec/aec.go
@@ -0,0 +1,137 @@
+package aec
+
+import "fmt"
+
+// EraseMode is listed in a variable EraseModes.
+type EraseMode uint
+
+var (
+ // EraseModes is a list of EraseMode.
+ EraseModes struct {
+ // All erase all.
+ All EraseMode
+
+ // Head erase to head.
+ Head EraseMode
+
+ // Tail erase to tail.
+ Tail EraseMode
+ }
+
+ // Save saves the cursor position.
+ Save ANSI
+
+ // Restore restores the cursor position.
+ Restore ANSI
+
+ // Hide hides the cursor.
+ Hide ANSI
+
+ // Show shows the cursor.
+ Show ANSI
+
+ // Report reports the cursor position.
+ Report ANSI
+)
+
+// Up moves up the cursor.
+func Up(n uint) ANSI {
+ if n == 0 {
+ return empty
+ }
+ return newAnsi(fmt.Sprintf(esc+"%dA", n))
+}
+
+// Down moves down the cursor.
+func Down(n uint) ANSI {
+ if n == 0 {
+ return empty
+ }
+ return newAnsi(fmt.Sprintf(esc+"%dB", n))
+}
+
+// Right moves right the cursor.
+func Right(n uint) ANSI {
+ if n == 0 {
+ return empty
+ }
+ return newAnsi(fmt.Sprintf(esc+"%dC", n))
+}
+
+// Left moves left the cursor.
+func Left(n uint) ANSI {
+ if n == 0 {
+ return empty
+ }
+ return newAnsi(fmt.Sprintf(esc+"%dD", n))
+}
+
+// NextLine moves down the cursor to head of a line.
+func NextLine(n uint) ANSI {
+ if n == 0 {
+ return empty
+ }
+ return newAnsi(fmt.Sprintf(esc+"%dE", n))
+}
+
+// PreviousLine moves up the cursor to head of a line.
+func PreviousLine(n uint) ANSI {
+ if n == 0 {
+ return empty
+ }
+ return newAnsi(fmt.Sprintf(esc+"%dF", n))
+}
+
+// Column set the cursor position to a given column.
+func Column(col uint) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"%dG", col))
+}
+
+// Position set the cursor position to a given absolute position.
+func Position(row, col uint) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col))
+}
+
+// EraseDisplay erases display by given EraseMode.
+func EraseDisplay(m EraseMode) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"%dJ", m))
+}
+
+// EraseLine erases lines by given EraseMode.
+func EraseLine(m EraseMode) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"%dK", m))
+}
+
+// ScrollUp scrolls up the page.
+func ScrollUp(n int) ANSI {
+ if n == 0 {
+ return empty
+ }
+ return newAnsi(fmt.Sprintf(esc+"%dS", n))
+}
+
+// ScrollDown scrolls down the page.
+func ScrollDown(n int) ANSI {
+ if n == 0 {
+ return empty
+ }
+ return newAnsi(fmt.Sprintf(esc+"%dT", n))
+}
+
+func init() {
+ EraseModes = struct {
+ All EraseMode
+ Head EraseMode
+ Tail EraseMode
+ }{
+ Tail: 0,
+ Head: 1,
+ All: 2,
+ }
+
+ Save = newAnsi(esc + "s")
+ Restore = newAnsi(esc + "u")
+ Hide = newAnsi(esc + "?25l")
+ Show = newAnsi(esc + "?25h")
+ Report = newAnsi(esc + "6n")
+}
diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go
new file mode 100644
index 000000000..e60722e6e
--- /dev/null
+++ b/vendor/github.com/morikuni/aec/ansi.go
@@ -0,0 +1,59 @@
+package aec
+
+import (
+ "fmt"
+ "strings"
+)
+
+const esc = "\x1b["
+
+// Reset resets SGR effect.
+const Reset string = "\x1b[0m"
+
+var empty = newAnsi("")
+
+// ANSI represents ANSI escape code.
+type ANSI interface {
+ fmt.Stringer
+
+ // With adapts given ANSIs.
+ With(...ANSI) ANSI
+
+ // Apply wraps given string in ANSI.
+ Apply(string) string
+}
+
+type ansiImpl string
+
+func newAnsi(s string) *ansiImpl {
+ r := ansiImpl(s)
+ return &r
+}
+
+func (a *ansiImpl) With(ansi ...ANSI) ANSI {
+ return concat(append([]ANSI{a}, ansi...))
+}
+
+func (a *ansiImpl) Apply(s string) string {
+ return a.String() + s + Reset
+}
+
+func (a *ansiImpl) String() string {
+ return string(*a)
+}
+
+// Apply wraps given string in ANSIs.
+func Apply(s string, ansi ...ANSI) string {
+ if len(ansi) == 0 {
+ return s
+ }
+ return concat(ansi).Apply(s)
+}
+
+func concat(ansi []ANSI) ANSI {
+ strs := make([]string, 0, len(ansi))
+ for _, p := range ansi {
+ strs = append(strs, p.String())
+ }
+ return newAnsi(strings.Join(strs, ""))
+}
diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go
new file mode 100644
index 000000000..13bd002d4
--- /dev/null
+++ b/vendor/github.com/morikuni/aec/builder.go
@@ -0,0 +1,388 @@
+package aec
+
+// Builder is a lightweight syntax to construct customized ANSI.
+type Builder struct {
+ ANSI ANSI
+}
+
+// EmptyBuilder is an initialized Builder.
+var EmptyBuilder *Builder
+
+// NewBuilder creates a Builder from existing ANSI.
+func NewBuilder(a ...ANSI) *Builder {
+ return &Builder{concat(a)}
+}
+
+// With is a syntax for With.
+func (builder *Builder) With(a ...ANSI) *Builder {
+ return NewBuilder(builder.ANSI.With(a...))
+}
+
+// Up is a syntax for Up.
+func (builder *Builder) Up(n uint) *Builder {
+ return builder.With(Up(n))
+}
+
+// Down is a syntax for Down.
+func (builder *Builder) Down(n uint) *Builder {
+ return builder.With(Down(n))
+}
+
+// Right is a syntax for Right.
+func (builder *Builder) Right(n uint) *Builder {
+ return builder.With(Right(n))
+}
+
+// Left is a syntax for Left.
+func (builder *Builder) Left(n uint) *Builder {
+ return builder.With(Left(n))
+}
+
+// NextLine is a syntax for NextLine.
+func (builder *Builder) NextLine(n uint) *Builder {
+ return builder.With(NextLine(n))
+}
+
+// PreviousLine is a syntax for PreviousLine.
+func (builder *Builder) PreviousLine(n uint) *Builder {
+ return builder.With(PreviousLine(n))
+}
+
+// Column is a syntax for Column.
+func (builder *Builder) Column(col uint) *Builder {
+ return builder.With(Column(col))
+}
+
+// Position is a syntax for Position.
+func (builder *Builder) Position(row, col uint) *Builder {
+ return builder.With(Position(row, col))
+}
+
+// EraseDisplay is a syntax for EraseDisplay.
+func (builder *Builder) EraseDisplay(m EraseMode) *Builder {
+ return builder.With(EraseDisplay(m))
+}
+
+// EraseLine is a syntax for EraseLine.
+func (builder *Builder) EraseLine(m EraseMode) *Builder {
+ return builder.With(EraseLine(m))
+}
+
+// ScrollUp is a syntax for ScrollUp.
+func (builder *Builder) ScrollUp(n int) *Builder {
+ return builder.With(ScrollUp(n))
+}
+
+// ScrollDown is a syntax for ScrollDown.
+func (builder *Builder) ScrollDown(n int) *Builder {
+ return builder.With(ScrollDown(n))
+}
+
+// Save is a syntax for Save.
+func (builder *Builder) Save() *Builder {
+ return builder.With(Save)
+}
+
+// Restore is a syntax for Restore.
+func (builder *Builder) Restore() *Builder {
+ return builder.With(Restore)
+}
+
+// Hide is a syntax for Hide.
+func (builder *Builder) Hide() *Builder {
+ return builder.With(Hide)
+}
+
+// Show is a syntax for Show.
+func (builder *Builder) Show() *Builder {
+ return builder.With(Show)
+}
+
+// Report is a syntax for Report.
+func (builder *Builder) Report() *Builder {
+ return builder.With(Report)
+}
+
+// Bold is a syntax for Bold.
+func (builder *Builder) Bold() *Builder {
+ return builder.With(Bold)
+}
+
+// Faint is a syntax for Faint.
+func (builder *Builder) Faint() *Builder {
+ return builder.With(Faint)
+}
+
+// Italic is a syntax for Italic.
+func (builder *Builder) Italic() *Builder {
+ return builder.With(Italic)
+}
+
+// Underline is a syntax for Underline.
+func (builder *Builder) Underline() *Builder {
+ return builder.With(Underline)
+}
+
+// BlinkSlow is a syntax for BlinkSlow.
+func (builder *Builder) BlinkSlow() *Builder {
+ return builder.With(BlinkSlow)
+}
+
+// BlinkRapid is a syntax for BlinkRapid.
+func (builder *Builder) BlinkRapid() *Builder {
+ return builder.With(BlinkRapid)
+}
+
+// Inverse is a syntax for Inverse.
+func (builder *Builder) Inverse() *Builder {
+ return builder.With(Inverse)
+}
+
+// Conceal is a syntax for Conceal.
+func (builder *Builder) Conceal() *Builder {
+ return builder.With(Conceal)
+}
+
+// CrossOut is a syntax for CrossOut.
+func (builder *Builder) CrossOut() *Builder {
+ return builder.With(CrossOut)
+}
+
+// BlackF is a syntax for BlackF.
+func (builder *Builder) BlackF() *Builder {
+ return builder.With(BlackF)
+}
+
+// RedF is a syntax for RedF.
+func (builder *Builder) RedF() *Builder {
+ return builder.With(RedF)
+}
+
+// GreenF is a syntax for GreenF.
+func (builder *Builder) GreenF() *Builder {
+ return builder.With(GreenF)
+}
+
+// YellowF is a syntax for YellowF.
+func (builder *Builder) YellowF() *Builder {
+ return builder.With(YellowF)
+}
+
+// BlueF is a syntax for BlueF.
+func (builder *Builder) BlueF() *Builder {
+ return builder.With(BlueF)
+}
+
+// MagentaF is a syntax for MagentaF.
+func (builder *Builder) MagentaF() *Builder {
+ return builder.With(MagentaF)
+}
+
+// CyanF is a syntax for CyanF.
+func (builder *Builder) CyanF() *Builder {
+ return builder.With(CyanF)
+}
+
+// WhiteF is a syntax for WhiteF.
+func (builder *Builder) WhiteF() *Builder {
+ return builder.With(WhiteF)
+}
+
+// DefaultF is a syntax for DefaultF.
+func (builder *Builder) DefaultF() *Builder {
+ return builder.With(DefaultF)
+}
+
+// BlackB is a syntax for BlackB.
+func (builder *Builder) BlackB() *Builder {
+ return builder.With(BlackB)
+}
+
+// RedB is a syntax for RedB.
+func (builder *Builder) RedB() *Builder {
+ return builder.With(RedB)
+}
+
+// GreenB is a syntax for GreenB.
+func (builder *Builder) GreenB() *Builder {
+ return builder.With(GreenB)
+}
+
+// YellowB is a syntax for YellowB.
+func (builder *Builder) YellowB() *Builder {
+ return builder.With(YellowB)
+}
+
+// BlueB is a syntax for BlueB.
+func (builder *Builder) BlueB() *Builder {
+ return builder.With(BlueB)
+}
+
+// MagentaB is a syntax for MagentaB.
+func (builder *Builder) MagentaB() *Builder {
+ return builder.With(MagentaB)
+}
+
+// CyanB is a syntax for CyanB.
+func (builder *Builder) CyanB() *Builder {
+ return builder.With(CyanB)
+}
+
+// WhiteB is a syntax for WhiteB.
+func (builder *Builder) WhiteB() *Builder {
+ return builder.With(WhiteB)
+}
+
+// DefaultB is a syntax for DefaultB.
+func (builder *Builder) DefaultB() *Builder {
+ return builder.With(DefaultB)
+}
+
+// Frame is a syntax for Frame.
+func (builder *Builder) Frame() *Builder {
+ return builder.With(Frame)
+}
+
+// Encircle is a syntax for Encircle.
+func (builder *Builder) Encircle() *Builder {
+ return builder.With(Encircle)
+}
+
+// Overline is a syntax for Overline.
+func (builder *Builder) Overline() *Builder {
+ return builder.With(Overline)
+}
+
+// LightBlackF is a syntax for LightBlueF.
+func (builder *Builder) LightBlackF() *Builder {
+ return builder.With(LightBlackF)
+}
+
+// LightRedF is a syntax for LightRedF.
+func (builder *Builder) LightRedF() *Builder {
+ return builder.With(LightRedF)
+}
+
+// LightGreenF is a syntax for LightGreenF.
+func (builder *Builder) LightGreenF() *Builder {
+ return builder.With(LightGreenF)
+}
+
+// LightYellowF is a syntax for LightYellowF.
+func (builder *Builder) LightYellowF() *Builder {
+ return builder.With(LightYellowF)
+}
+
+// LightBlueF is a syntax for LightBlueF.
+func (builder *Builder) LightBlueF() *Builder {
+ return builder.With(LightBlueF)
+}
+
+// LightMagentaF is a syntax for LightMagentaF.
+func (builder *Builder) LightMagentaF() *Builder {
+ return builder.With(LightMagentaF)
+}
+
+// LightCyanF is a syntax for LightCyanF.
+func (builder *Builder) LightCyanF() *Builder {
+ return builder.With(LightCyanF)
+}
+
+// LightWhiteF is a syntax for LightWhiteF.
+func (builder *Builder) LightWhiteF() *Builder {
+ return builder.With(LightWhiteF)
+}
+
+// LightBlackB is a syntax for LightBlackB.
+func (builder *Builder) LightBlackB() *Builder {
+ return builder.With(LightBlackB)
+}
+
+// LightRedB is a syntax for LightRedB.
+func (builder *Builder) LightRedB() *Builder {
+ return builder.With(LightRedB)
+}
+
+// LightGreenB is a syntax for LightGreenB.
+func (builder *Builder) LightGreenB() *Builder {
+ return builder.With(LightGreenB)
+}
+
+// LightYellowB is a syntax for LightYellowB.
+func (builder *Builder) LightYellowB() *Builder {
+ return builder.With(LightYellowB)
+}
+
+// LightBlueB is a syntax for LightBlueB.
+func (builder *Builder) LightBlueB() *Builder {
+ return builder.With(LightBlueB)
+}
+
+// LightMagentaB is a syntax for LightMagentaB.
+func (builder *Builder) LightMagentaB() *Builder {
+ return builder.With(LightMagentaB)
+}
+
+// LightCyanB is a syntax for LightCyanB.
+func (builder *Builder) LightCyanB() *Builder {
+ return builder.With(LightCyanB)
+}
+
+// LightWhiteB is a syntax for LightWhiteB.
+func (builder *Builder) LightWhiteB() *Builder {
+ return builder.With(LightWhiteB)
+}
+
+// Color3BitF is a syntax for Color3BitF.
+func (builder *Builder) Color3BitF(c RGB3Bit) *Builder {
+ return builder.With(Color3BitF(c))
+}
+
+// Color3BitB is a syntax for Color3BitB.
+func (builder *Builder) Color3BitB(c RGB3Bit) *Builder {
+ return builder.With(Color3BitB(c))
+}
+
+// Color8BitF is a syntax for Color8BitF.
+func (builder *Builder) Color8BitF(c RGB8Bit) *Builder {
+ return builder.With(Color8BitF(c))
+}
+
+// Color8BitB is a syntax for Color8BitB.
+func (builder *Builder) Color8BitB(c RGB8Bit) *Builder {
+ return builder.With(Color8BitB(c))
+}
+
+// FullColorF is a syntax for FullColorF.
+func (builder *Builder) FullColorF(r, g, b uint8) *Builder {
+ return builder.With(FullColorF(r, g, b))
+}
+
+// FullColorB is a syntax for FullColorB.
+func (builder *Builder) FullColorB(r, g, b uint8) *Builder {
+ return builder.With(FullColorB(r, g, b))
+}
+
+// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit.
+func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder {
+ return builder.Color3BitF(NewRGB3Bit(r, g, b))
+}
+
+// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit.
+func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder {
+ return builder.Color3BitB(NewRGB3Bit(r, g, b))
+}
+
+// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit.
+func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder {
+ return builder.Color8BitF(NewRGB8Bit(r, g, b))
+}
+
+// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit.
+func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder {
+ return builder.Color8BitB(NewRGB8Bit(r, g, b))
+}
+
+func init() {
+ EmptyBuilder = &Builder{empty}
+}
diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif
new file mode 100644
index 000000000..c6c613bb7
--- /dev/null
+++ b/vendor/github.com/morikuni/aec/sample.gif
Binary files differ
diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go
new file mode 100644
index 000000000..0ba3464e6
--- /dev/null
+++ b/vendor/github.com/morikuni/aec/sgr.go
@@ -0,0 +1,202 @@
+package aec
+
+import (
+ "fmt"
+)
+
+// RGB3Bit is a 3bit RGB color.
+type RGB3Bit uint8
+
+// RGB8Bit is a 8bit RGB color.
+type RGB8Bit uint8
+
+func newSGR(n uint) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"%dm", n))
+}
+
+// NewRGB3Bit create a RGB3Bit from given RGB.
+func NewRGB3Bit(r, g, b uint8) RGB3Bit {
+ return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4))
+}
+
+// NewRGB8Bit create a RGB8Bit from given RGB.
+func NewRGB8Bit(r, g, b uint8) RGB8Bit {
+ return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43)
+}
+
+// Color3BitF set the foreground color of text.
+func Color3BitF(c RGB3Bit) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"%dm", c+30))
+}
+
+// Color3BitB set the background color of text.
+func Color3BitB(c RGB3Bit) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"%dm", c+40))
+}
+
+// Color8BitF set the foreground color of text.
+func Color8BitF(c RGB8Bit) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c))
+}
+
+// Color8BitB set the background color of text.
+func Color8BitB(c RGB8Bit) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c))
+}
+
+// FullColorF set the foreground color of text.
+func FullColorF(r, g, b uint8) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b))
+}
+
+// FullColorB set the foreground color of text.
+func FullColorB(r, g, b uint8) ANSI {
+ return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b))
+}
+
+// Style
+var (
+ // Bold set the text style to bold or increased intensity.
+ Bold ANSI
+
+ // Faint set the text style to faint.
+ Faint ANSI
+
+ // Italic set the text style to italic.
+ Italic ANSI
+
+ // Underline set the text style to underline.
+ Underline ANSI
+
+ // BlinkSlow set the text style to slow blink.
+ BlinkSlow ANSI
+
+ // BlinkRapid set the text style to rapid blink.
+ BlinkRapid ANSI
+
+ // Inverse swap the foreground color and background color.
+ Inverse ANSI
+
+ // Conceal set the text style to conceal.
+ Conceal ANSI
+
+ // CrossOut set the text style to crossed out.
+ CrossOut ANSI
+
+ // Frame set the text style to framed.
+ Frame ANSI
+
+ // Encircle set the text style to encircled.
+ Encircle ANSI
+
+ // Overline set the text style to overlined.
+ Overline ANSI
+)
+
+// Foreground color of text.
+var (
+ // DefaultF is the default color of foreground.
+ DefaultF ANSI
+
+ // Normal color
+ BlackF ANSI
+ RedF ANSI
+ GreenF ANSI
+ YellowF ANSI
+ BlueF ANSI
+ MagentaF ANSI
+ CyanF ANSI
+ WhiteF ANSI
+
+ // Light color
+ LightBlackF ANSI
+ LightRedF ANSI
+ LightGreenF ANSI
+ LightYellowF ANSI
+ LightBlueF ANSI
+ LightMagentaF ANSI
+ LightCyanF ANSI
+ LightWhiteF ANSI
+)
+
+// Background color of text.
+var (
+ // DefaultB is the default color of background.
+ DefaultB ANSI
+
+ // Normal color
+ BlackB ANSI
+ RedB ANSI
+ GreenB ANSI
+ YellowB ANSI
+ BlueB ANSI
+ MagentaB ANSI
+ CyanB ANSI
+ WhiteB ANSI
+
+ // Light color
+ LightBlackB ANSI
+ LightRedB ANSI
+ LightGreenB ANSI
+ LightYellowB ANSI
+ LightBlueB ANSI
+ LightMagentaB ANSI
+ LightCyanB ANSI
+ LightWhiteB ANSI
+)
+
+func init() {
+ Bold = newSGR(1)
+ Faint = newSGR(2)
+ Italic = newSGR(3)
+ Underline = newSGR(4)
+ BlinkSlow = newSGR(5)
+ BlinkRapid = newSGR(6)
+ Inverse = newSGR(7)
+ Conceal = newSGR(8)
+ CrossOut = newSGR(9)
+
+ BlackF = newSGR(30)
+ RedF = newSGR(31)
+ GreenF = newSGR(32)
+ YellowF = newSGR(33)
+ BlueF = newSGR(34)
+ MagentaF = newSGR(35)
+ CyanF = newSGR(36)
+ WhiteF = newSGR(37)
+
+ DefaultF = newSGR(39)
+
+ BlackB = newSGR(40)
+ RedB = newSGR(41)
+ GreenB = newSGR(42)
+ YellowB = newSGR(43)
+ BlueB = newSGR(44)
+ MagentaB = newSGR(45)
+ CyanB = newSGR(46)
+ WhiteB = newSGR(47)
+
+ DefaultB = newSGR(49)
+
+ Frame = newSGR(51)
+ Encircle = newSGR(52)
+ Overline = newSGR(53)
+
+ LightBlackF = newSGR(90)
+ LightRedF = newSGR(91)
+ LightGreenF = newSGR(92)
+ LightYellowF = newSGR(93)
+ LightBlueF = newSGR(94)
+ LightMagentaF = newSGR(95)
+ LightCyanF = newSGR(96)
+ LightWhiteF = newSGR(97)
+
+ LightBlackB = newSGR(100)
+ LightRedB = newSGR(101)
+ LightGreenB = newSGR(102)
+ LightYellowB = newSGR(103)
+ LightBlueB = newSGR(104)
+ LightMagentaB = newSGR(105)
+ LightCyanB = newSGR(106)
+ LightWhiteB = newSGR(107)
+}
diff --git a/vendor/github.com/openshift/imagebuilder/OWNERS b/vendor/github.com/openshift/imagebuilder/OWNERS
index 6a900fd07..db859b7bd 100644
--- a/vendor/github.com/openshift/imagebuilder/OWNERS
+++ b/vendor/github.com/openshift/imagebuilder/OWNERS
@@ -1,5 +1,3 @@
-reviewers:
-- bparees
approvers:
- TomSweeneyRedHat
- mrunalp
diff --git a/vendor/github.com/openshift/imagebuilder/README.md b/vendor/github.com/openshift/imagebuilder/README.md
index fd96ed940..772747bce 100644
--- a/vendor/github.com/openshift/imagebuilder/README.md
+++ b/vendor/github.com/openshift/imagebuilder/README.md
@@ -64,6 +64,11 @@ $ imagebuilder -f Dockerfile:Dockerfile.extra .
will build the current directory and combine the first Dockerfile with the second. The FROM in the second image
is ignored.
+Note that imagebuilder adds the built image to the `docker` daemon's internal storage. If you use `podman` you must first pull the image into its local registry:
+
+```
+$ podman pull docker-daemon:<IMAGE>:<TAG> # must contain either a tag or a digest
+```
## Code Example
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
index 86b139b65..5a2d0d539 100644
--- a/vendor/github.com/openshift/imagebuilder/builder.go
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -212,6 +212,7 @@ func NewStages(node *parser.Node, b *Builder) (Stages, error) {
Builder: &Builder{
Args: b.Args,
AllowedArgs: b.AllowedArgs,
+ Env: b.Env,
},
Node: root,
})
@@ -436,7 +437,7 @@ func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error {
SplitChildren(node, command.From)
b.RunConfig = *image.Config
- b.Env = b.RunConfig.Env
+ b.Env = append(b.Env, b.RunConfig.Env...)
b.RunConfig.Env = nil
// Check to see if we have a default PATH, note that windows won't
diff --git a/vendor/github.com/openshift/imagebuilder/constants.go b/vendor/github.com/openshift/imagebuilder/constants.go
index 86cd2e5e2..7b41e5a49 100644
--- a/vendor/github.com/openshift/imagebuilder/constants.go
+++ b/vendor/github.com/openshift/imagebuilder/constants.go
@@ -4,10 +4,6 @@ const (
// in docker/system
NoBaseImageSpecifier = "scratch"
- // not yet part of our import
- commandArg = "arg"
- commandStopSignal = "stopsignal"
-
// in docker/system
defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
)
diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go
index 1ea358451..1bbb25f2b 100644
--- a/vendor/github.com/openshift/imagebuilder/evaluator.go
+++ b/vendor/github.com/openshift/imagebuilder/evaluator.go
@@ -20,16 +20,16 @@ func ParseDockerfile(r io.Reader) (*parser.Node, error) {
// Environment variable interpolation will happen on these statements only.
var replaceEnvAllowed = map[string]bool{
- command.Env: true,
- command.Label: true,
- command.Add: true,
- command.Copy: true,
- command.Workdir: true,
- command.Expose: true,
- command.Volume: true,
- command.User: true,
- commandStopSignal: true,
- commandArg: true,
+ command.Env: true,
+ command.Label: true,
+ command.Add: true,
+ command.Copy: true,
+ command.Workdir: true,
+ command.Expose: true,
+ command.Volume: true,
+ command.User: true,
+ command.StopSignal: true,
+ command.Arg: true,
}
// Certain commands are allowed to have their args split into more
diff --git a/vendor/github.com/openshift/imagebuilder/vendor.conf b/vendor/github.com/openshift/imagebuilder/vendor.conf
index e437b79c3..c3f7d1a6b 100644
--- a/vendor/github.com/openshift/imagebuilder/vendor.conf
+++ b/vendor/github.com/openshift/imagebuilder/vendor.conf
@@ -5,7 +5,6 @@ github.com/docker/go-connections 97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e
github.com/docker/go-units 2fb04c6466a548a03cb009c5569ee1ab1e35398e
github.com/fsouza/go-dockerclient openshift-4.0 https://github.com/openshift/go-dockerclient.git
github.com/gogo/protobuf c5a62797aee0054613cc578653a16c6237fef080
-github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e
github.com/Microsoft/go-winio 1a8911d1ed007260465c3bfbbc785ac6915a0bb8
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
@@ -18,3 +17,4 @@ github.com/sirupsen/logrus d7b6bf5e4d26448fd977d07d745a2a66097ddecb
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83
golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba
+k8s.io/klog 8e90cee79f823779174776412c13478955131846
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
new file mode 100644
index 000000000..9857fe53d
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -0,0 +1,66 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errgroup provides synchronization, error propagation, and Context
+// cancelation for groups of goroutines working on subtasks of a common task.
+package errgroup
+
+import (
+ "context"
+ "sync"
+)
+
+// A Group is a collection of goroutines working on subtasks that are part of
+// the same overall task.
+//
+// A zero Group is valid and does not cancel on error.
+type Group struct {
+ cancel func()
+
+ wg sync.WaitGroup
+
+ errOnce sync.Once
+ err error
+}
+
+// WithContext returns a new Group and an associated Context derived from ctx.
+//
+// The derived Context is canceled the first time a function passed to Go
+// returns a non-nil error or the first time Wait returns, whichever occurs
+// first.
+func WithContext(ctx context.Context) (*Group, context.Context) {
+ ctx, cancel := context.WithCancel(ctx)
+ return &Group{cancel: cancel}, ctx
+}
+
+// Wait blocks until all function calls from the Go method have returned, then
+// returns the first non-nil error (if any) from them.
+func (g *Group) Wait() error {
+ g.wg.Wait()
+ if g.cancel != nil {
+ g.cancel()
+ }
+ return g.err
+}
+
+// Go calls the given function in a new goroutine.
+//
+// The first call to return a non-nil error cancels the group; its error will be
+// returned by Wait.
+func (g *Group) Go(f func() error) {
+ g.wg.Add(1)
+
+ go func() {
+ defer g.wg.Done()
+
+ if err := f(); err != nil {
+ g.errOnce.Do(func() {
+ g.err = err
+ if g.cancel != nil {
+ g.cancel()
+ }
+ })
+ }
+ }()
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 65a99869e..ae456b4a5 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -36,8 +36,12 @@ github.com/buger/goterm
# github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/checkpoint-restore/go-criu
github.com/checkpoint-restore/go-criu/rpc
+# github.com/containerd/containerd v1.3.0
+github.com/containerd/containerd/errdefs
# github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc
-github.com/containerd/continuity/pathdriver
+github.com/containerd/continuity/fs
+github.com/containerd/continuity/sysx
+github.com/containerd/continuity/syscallx
# github.com/containernetworking/cni v0.7.1
github.com/containernetworking/cni/pkg/types
github.com/containernetworking/cni/pkg/types/current
@@ -51,7 +55,7 @@ github.com/containernetworking/plugins/pkg/ip
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
-# github.com/containers/buildah v1.11.3
+# github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e
github.com/containers/buildah
github.com/containers/buildah/imagebuildah
github.com/containers/buildah/pkg/chrootuser
@@ -68,44 +72,44 @@ github.com/containers/buildah/pkg/cgroups
github.com/containers/buildah/pkg/overlay
github.com/containers/buildah/pkg/unshare
github.com/containers/buildah/pkg/umask
-# github.com/containers/image/v4 v4.0.1
-github.com/containers/image/v4/directory
-github.com/containers/image/v4/docker
-github.com/containers/image/v4/docker/archive
-github.com/containers/image/v4/manifest
-github.com/containers/image/v4/pkg/docker/config
-github.com/containers/image/v4/signature
-github.com/containers/image/v4/transports
-github.com/containers/image/v4/transports/alltransports
-github.com/containers/image/v4/types
-github.com/containers/image/v4/oci/archive
-github.com/containers/image/v4/storage
-github.com/containers/image/v4/copy
-github.com/containers/image/v4/docker/reference
-github.com/containers/image/v4/docker/tarfile
-github.com/containers/image/v4/oci/layout
-github.com/containers/image/v4/tarball
-github.com/containers/image/v4/pkg/sysregistriesv2
-github.com/containers/image/v4/image
-github.com/containers/image/v4/directory/explicitfilepath
-github.com/containers/image/v4/docker/policyconfiguration
-github.com/containers/image/v4/pkg/blobinfocache/none
-github.com/containers/image/v4/pkg/tlsclientconfig
-github.com/containers/image/v4/pkg/compression
-github.com/containers/image/v4/pkg/strslice
-github.com/containers/image/v4/internal/pkg/keyctl
-github.com/containers/image/v4/version
-github.com/containers/image/v4/docker/daemon
-github.com/containers/image/v4/openshift
-github.com/containers/image/v4/ostree
-github.com/containers/image/v4/pkg/compression/types
-github.com/containers/image/v4/internal/tmpdir
-github.com/containers/image/v4/oci/internal
-github.com/containers/image/v4/pkg/blobinfocache
-github.com/containers/image/v4/pkg/compression/internal
-github.com/containers/image/v4/pkg/blobinfocache/boltdb
-github.com/containers/image/v4/pkg/blobinfocache/memory
-github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize
+# github.com/containers/image/v5 v5.0.0
+github.com/containers/image/v5/directory
+github.com/containers/image/v5/docker
+github.com/containers/image/v5/docker/archive
+github.com/containers/image/v5/manifest
+github.com/containers/image/v5/pkg/docker/config
+github.com/containers/image/v5/signature
+github.com/containers/image/v5/transports
+github.com/containers/image/v5/transports/alltransports
+github.com/containers/image/v5/types
+github.com/containers/image/v5/oci/archive
+github.com/containers/image/v5/storage
+github.com/containers/image/v5/copy
+github.com/containers/image/v5/docker/reference
+github.com/containers/image/v5/docker/tarfile
+github.com/containers/image/v5/image
+github.com/containers/image/v5/oci/layout
+github.com/containers/image/v5/tarball
+github.com/containers/image/v5/pkg/sysregistriesv2
+github.com/containers/image/v5/directory/explicitfilepath
+github.com/containers/image/v5/docker/policyconfiguration
+github.com/containers/image/v5/pkg/blobinfocache/none
+github.com/containers/image/v5/pkg/tlsclientconfig
+github.com/containers/image/v5/pkg/compression
+github.com/containers/image/v5/pkg/strslice
+github.com/containers/image/v5/internal/pkg/keyctl
+github.com/containers/image/v5/version
+github.com/containers/image/v5/docker/daemon
+github.com/containers/image/v5/openshift
+github.com/containers/image/v5/ostree
+github.com/containers/image/v5/pkg/compression/types
+github.com/containers/image/v5/internal/tmpdir
+github.com/containers/image/v5/oci/internal
+github.com/containers/image/v5/pkg/blobinfocache
+github.com/containers/image/v5/pkg/compression/internal
+github.com/containers/image/v5/pkg/blobinfocache/boltdb
+github.com/containers/image/v5/pkg/blobinfocache/memory
+github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize
# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b
github.com/containers/libtrust
# github.com/containers/psgo v1.3.2
@@ -183,7 +187,7 @@ github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
github.com/docker/distribution/metrics
-# github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b
+# github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce
github.com/docker/docker/pkg/signal
github.com/docker/docker/pkg/homedir
github.com/docker/docker/oci/caps
@@ -193,7 +197,6 @@ github.com/docker/docker/pkg/ioutils
github.com/docker/docker/profiles/seccomp
github.com/docker/docker/pkg/parsers
github.com/docker/docker/api/types/versions
-github.com/docker/docker/pkg/idtools
github.com/docker/docker/errdefs
github.com/docker/docker/pkg/term/windows
github.com/docker/docker/pkg/longpath
@@ -201,7 +204,9 @@ github.com/docker/docker/api/types
github.com/docker/docker/pkg/parsers/kernel
github.com/docker/docker/api/types/registry
github.com/docker/docker/api/types/swarm
+github.com/docker/docker/pkg/archive
github.com/docker/docker/pkg/fileutils
+github.com/docker/docker/pkg/jsonmessage
github.com/docker/docker/pkg/stdcopy
github.com/docker/docker/pkg/system
github.com/docker/docker/client
@@ -210,6 +215,7 @@ github.com/docker/docker/api/types/filters
github.com/docker/docker/api/types/mount
github.com/docker/docker/api/types/network
github.com/docker/docker/api/types/swarm/runtime
+github.com/docker/docker/pkg/idtools
github.com/docker/docker/pkg/pools
github.com/docker/docker/pkg/mount
github.com/docker/docker/api
@@ -243,11 +249,8 @@ github.com/etcd-io/bbolt
github.com/fatih/camelcase
# github.com/fsnotify/fsnotify v1.4.7
github.com/fsnotify/fsnotify
-# github.com/fsouza/go-dockerclient v1.4.4
+# github.com/fsouza/go-dockerclient v1.5.0
github.com/fsouza/go-dockerclient
-github.com/fsouza/go-dockerclient/internal/archive
-github.com/fsouza/go-dockerclient/internal/jsonmessage
-github.com/fsouza/go-dockerclient/internal/term
# github.com/ghodss/yaml v1.0.0
github.com/ghodss/yaml
# github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f
@@ -277,8 +280,6 @@ github.com/hpcloud/tail/ratelimiter
github.com/hpcloud/tail/util
github.com/hpcloud/tail/watch
github.com/hpcloud/tail/winfile
-# github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd
-github.com/ijc/Gotty
# github.com/imdario/mergo v0.3.7
github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.0
@@ -312,6 +313,8 @@ github.com/mistifyio/go-zfs
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1
github.com/modern-go/reflect2
+# github.com/morikuni/aec v1.0.0
+github.com/morikuni/aec
# github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
github.com/mrunalp/fileutils
# github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c
@@ -384,7 +387,7 @@ github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/go-selinux
# github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible
github.com/openshift/api/config/v1
-# github.com/openshift/imagebuilder v1.1.0
+# github.com/openshift/imagebuilder v1.1.1
github.com/openshift/imagebuilder
github.com/openshift/imagebuilder/dockerfile/parser
github.com/openshift/imagebuilder/dockerfile/command
@@ -490,7 +493,7 @@ github.com/xeipuuv/gojsonpointer
github.com/xeipuuv/gojsonreference
# github.com/xeipuuv/gojsonschema v1.1.0
github.com/xeipuuv/gojsonschema
-# golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
+# golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
golang.org/x/crypto/ssh/terminal
golang.org/x/crypto/openpgp
golang.org/x/crypto/openpgp/armor
@@ -516,6 +519,7 @@ golang.org/x/oauth2
golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sync/semaphore
+golang.org/x/sync/errgroup
# golang.org/x/sys v0.0.0-20190902133755-9109b7679e13
golang.org/x/sys/unix
golang.org/x/sys/windows