aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/buildah/README.md2
-rw-r--r--vendor/github.com/containers/buildah/buildah.go7
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go45
-rw-r--r--vendor/github.com/containers/buildah/commit.go62
-rw-r--r--vendor/github.com/containers/buildah/common.go30
-rw-r--r--vendor/github.com/containers/buildah/config.go34
-rw-r--r--vendor/github.com/containers/buildah/delete.go3
-rw-r--r--vendor/github.com/containers/buildah/docker/types.go5
-rw-r--r--vendor/github.com/containers/buildah/image.go105
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go268
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go140
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/util.go25
-rw-r--r--vendor/github.com/containers/buildah/info.go207
-rw-r--r--vendor/github.com/containers/buildah/new.go82
-rw-r--r--vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go517
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go18
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go2
-rw-r--r--vendor/github.com/containers/buildah/pull.go20
-rw-r--r--vendor/github.com/containers/buildah/run.go195
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.go4
-rw-r--r--vendor/github.com/containers/buildah/util.go8
-rw-r--r--vendor/github.com/containers/buildah/util/util.go66
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf11
-rw-r--r--vendor/github.com/containers/image/copy/copy.go101
-rw-r--r--vendor/github.com/containers/image/directory/directory_dest.go27
-rw-r--r--vendor/github.com/containers/image/directory/directory_src.go4
-rw-r--r--vendor/github.com/containers/image/docker/cache.go23
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go144
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go159
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/dest.go37
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go4
-rw-r--r--vendor/github.com/containers/image/image/docker_schema2.go7
-rw-r--r--vendor/github.com/containers/image/image/oci.go3
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_dest.go31
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_src.go8
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_dest.go33
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_src.go6
-rw-r--r--vendor/github.com/containers/image/openshift/openshift.go34
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go36
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go6
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go329
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/default.go63
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/memory.go123
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/none.go47
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go108
-rw-r--r--vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go86
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go110
-rw-r--r--vendor/github.com/containers/image/tarball/tarball_src.go5
-rw-r--r--vendor/github.com/containers/image/types/types.go128
-rw-r--r--vendor/github.com/containers/image/vendor.conf3
-rw-r--r--vendor/github.com/containers/storage/README.md2
-rw-r--r--vendor/github.com/containers/storage/containers.go10
-rw-r--r--vendor/github.com/containers/storage/containers_ffjson.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go24
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/copy/copy.go277
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go15
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go25
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/copy_linux.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go9
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go10
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go9
-rw-r--r--vendor/github.com/containers/storage/layers.go17
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/parser.go56
-rw-r--r--vendor/github.com/containers/storage/store.go118
-rw-r--r--vendor/github.com/containers/storage/vendor.conf6
-rw-r--r--vendor/github.com/google/shlex/COPYING202
-rw-r--r--vendor/github.com/google/shlex/README2
-rw-r--r--vendor/github.com/google/shlex/shlex.go416
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label.go18
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go13
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go17
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go13
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go39
-rw-r--r--vendor/github.com/openshift/imagebuilder/dispatchers.go23
-rw-r--r--vendor/github.com/openshift/imagebuilder/evaluator.go3
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go94
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h12
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go96
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go15
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go72
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go110
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go4
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go0
131 files changed, 4378 insertions, 927 deletions
diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md
index 6a79e524b..12eafdf88 100644
--- a/vendor/github.com/containers/buildah/README.md
+++ b/vendor/github.com/containers/buildah/README.md
@@ -105,8 +105,10 @@ $ sudo ./lighttpd.sh
| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
+| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. |
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
+| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 9994d6cd0..cbdb5c9f9 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -25,7 +25,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.5-dev"
+ Version = "1.6-dev"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
@@ -224,6 +224,7 @@ func GetBuildInfo(b *Builder) BuilderInfo {
ContainerID: b.ContainerID,
MountPoint: b.MountPoint,
ProcessLabel: b.ProcessLabel,
+ MountLabel: b.MountLabel,
ImageAnnotations: b.ImageAnnotations,
ImageCreatedBy: b.ImageCreatedBy,
OCIv1: b.OCIv1,
@@ -316,6 +317,10 @@ type BuilderOptions struct {
// the registry together, can not be resolved to a reference to a
// source image. No separator is implicitly added.
Transport string
+ // PullBlobDirectory is the name of a directory in which we'll attempt
+ // to store copies of layer blobs that we pull down, if any. It should
+ // already exist.
+ PullBlobDirectory string
// Mount signals to NewBuilder() that the container should be mounted
// immediately.
Mount bool
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 51e2d2bd4..6a1400e61 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -955,6 +955,20 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
return nil
}
+func makeReadOnly(mntpoint string, flags uintptr) error {
+ var fs unix.Statfs_t
+ // Make sure it's read-only.
+ if err := unix.Statfs(mntpoint, &fs); err != nil {
+ return errors.Wrapf(err, "error checking if directory %q was bound read-only", mntpoint)
+ }
+ if fs.Flags&unix.ST_RDONLY == 0 {
+ if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil {
+ return errors.Wrapf(err, "error remounting %s in mount namespace read-only", mntpoint)
+ }
+ }
+ return nil
+}
+
// setupChrootBindMounts actually bind mounts things under the rootfs, and returns a
// callback that will clean up its work.
func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func() error, err error) {
@@ -976,7 +990,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
bindFlags := commonFlags | unix.MS_NODEV
devFlags := commonFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY
procFlags := devFlags | unix.MS_NODEV
- sysFlags := devFlags | unix.MS_NODEV | unix.MS_RDONLY
+ sysFlags := devFlags | unix.MS_NODEV
// Bind /dev read-only.
subDev := filepath.Join(spec.Root.Path, "/dev")
@@ -1030,13 +1044,22 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
}
}
- // Make sure it's read-only.
- if err = unix.Statfs(subSys, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys)
+ if err := makeReadOnly(subSys, sysFlags); err != nil {
+ return undoBinds, err
}
- if fs.Flags&unix.ST_RDONLY == 0 {
- if err := unix.Mount(subSys, subSys, "bind", sysFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting /sys in mount namespace read-only")
+
+ mnts, _ := mount.GetMounts()
+ for _, m := range mnts {
+ if !strings.HasPrefix(m.Mountpoint, "/sys/") &&
+ m.Mountpoint != "/sys" {
+ continue
+ }
+ subSys := filepath.Join(spec.Root.Path, m.Mountpoint)
+ if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil {
+ return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
+ }
+ if err := makeReadOnly(subSys, sysFlags); err != nil {
+ return undoBinds, err
}
}
logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys"))
@@ -1044,10 +1067,6 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// Add /sys/fs/selinux to the set of masked paths, to ensure that we don't have processes
// attempting to interact with labeling, when they aren't allowed to do so.
spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux")
- // Add /sys/fs/cgroup to the set of masked paths, to ensure that we don't have processes
- // attempting to mess with cgroup configuration, when they aren't allowed to do so.
- spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup")
-
// Bind mount in everything we've been asked to mount.
for _, m := range spec.Mounts {
// Skip anything that we just mounted.
@@ -1143,11 +1162,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
logrus.Debugf("mounted a tmpfs to %q", target)
}
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys)
+ return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target)
}
if uintptr(fs.Flags)&expectedFlags != expectedFlags {
if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags")
+ return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags", target)
}
}
}
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 71696b432..591ead4e6 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -7,6 +7,7 @@ import (
"io/ioutil"
"time"
+ "github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
@@ -55,6 +56,12 @@ type CommitOptions struct {
// Squash tells the builder to produce an image with a single layer
// instead of with possibly more than one layer.
Squash bool
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers. If blobs are available, the
+ // manifest of the new image will reference the blobs rather than
+ // on-disk layers.
+ BlobDirectory string
// OnBuild is a list of commands to be run by images based on this image
OnBuild []string
@@ -85,6 +92,11 @@ type PushOptions struct {
// ManifestType is the format to use when saving the imge using the 'dir' transport
// possible options are oci, v2s1, and v2s2
ManifestType string
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers, substituting them in the list of
+ // blobs to copy whenever possible.
+ BlobDirectory string
}
// Commit writes the contents of the container, along with its updated
@@ -128,13 +140,37 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
}
}
- src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.Compression, options.HistoryTimestamp)
+ src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp)
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
}
+ var maybeCachedSrc types.ImageReference = src
+ var maybeCachedDest types.ImageReference = dest
+ if options.BlobDirectory != "" {
+ compress := types.PreserveOriginal
+ if options.Compression != archive.Uncompressed {
+ compress = types.Compress
+ }
+ cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
+ }
+ maybeCachedSrc = cache
+ cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(dest), options.BlobDirectory)
+ }
+ maybeCachedDest = cache
+ }
// "Copy" our image to where it needs to be.
+ switch options.Compression {
+ case archive.Uncompressed:
+ systemContext.OCIAcceptUncompressedLayers = true
+ case archive.Gzip:
+ systemContext.DirForceCompress = true
+ }
var manifestBytes []byte
- if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, src, nil, dest, systemContext, "")); err != nil {
+ if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
if len(options.AdditionalTags) > 0 {
@@ -209,10 +245,28 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
if err != nil {
return nil, "", err
}
+ var maybeCachedSrc types.ImageReference = src
+ if options.BlobDirectory != "" {
+ compress := types.PreserveOriginal
+ if options.Compression != archive.Uncompressed {
+ compress = types.Compress
+ }
+ cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
+ }
+ maybeCachedSrc = cache
+ }
// Copy everything.
+ switch options.Compression {
+ case archive.Uncompressed:
+ systemContext.OCIAcceptUncompressedLayers = true
+ case archive.Gzip:
+ systemContext.DirForceCompress = true
+ }
var manifestBytes []byte
- if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, src, nil, dest, systemContext, options.ManifestType)); err != nil {
- return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(src), transports.ImageName(dest))
+ if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil {
+ return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
fmt.Fprintf(options.ReportWriter, "")
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index 56a901925..dfdc33a22 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -2,12 +2,14 @@ package buildah
import (
"io"
-
- "github.com/sirupsen/logrus"
+ "os"
+ "path/filepath"
cp "github.com/containers/image/copy"
"github.com/containers/image/transports"
"github.com/containers/image/types"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/sirupsen/logrus"
)
const (
@@ -17,28 +19,42 @@ const (
DOCKER = "docker"
)
+// userRegistriesFile is the path to the per user registry configuration file.
+var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/registries.conf")
+
func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
sourceCtx := &types.SystemContext{}
if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext
+ } else {
+ if rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sourceCtx.SystemRegistriesConfPath = userRegistriesFile
+ }
+
+ }
}
sourceInsecure, err := isReferenceInsecure(sourceReference, sourceCtx)
if err != nil {
logrus.Debugf("error determining if registry for %q is insecure: %v", transports.ImageName(sourceReference), err)
} else if sourceInsecure {
- sourceCtx.DockerInsecureSkipTLSVerify = true
sourceCtx.OCIInsecureSkipTLSVerify = true
}
destinationCtx := &types.SystemContext{}
if destinationSystemContext != nil {
*destinationCtx = *destinationSystemContext
+ } else {
+ if rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ destinationCtx.SystemRegistriesConfPath = userRegistriesFile
+ }
+ }
}
destinationInsecure, err := isReferenceInsecure(destinationReference, destinationCtx)
if err != nil {
logrus.Debugf("error determining if registry for %q is insecure: %v", transports.ImageName(destinationReference), err)
} else if destinationInsecure {
- destinationCtx.DockerInsecureSkipTLSVerify = true
destinationCtx.OCIInsecureSkipTLSVerify = true
}
@@ -58,5 +74,11 @@ func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string)
if signaturePolicyPath != "" {
sc.SignaturePolicyPath = signaturePolicyPath
}
+ if sc.SystemRegistriesConfPath == "" && rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sc.SystemRegistriesConfPath = userRegistriesFile
+ }
+
+ }
return sc
}
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 89224b674..3609694f6 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -543,3 +543,37 @@ func (b *Builder) SetStopSignal(stopSignal string) {
b.OCIv1.Config.StopSignal = stopSignal
b.Docker.Config.StopSignal = stopSignal
}
+
+// Healthcheck returns information that recommends how a container engine
+// should check if a running container is "healthy".
+func (b *Builder) Healthcheck() *docker.HealthConfig {
+ if b.Docker.Config.Healthcheck == nil {
+ return nil
+ }
+ return &docker.HealthConfig{
+ Test: copyStringSlice(b.Docker.Config.Healthcheck.Test),
+ Interval: b.Docker.Config.Healthcheck.Interval,
+ Timeout: b.Docker.Config.Healthcheck.Timeout,
+ StartPeriod: b.Docker.Config.Healthcheck.StartPeriod,
+ Retries: b.Docker.Config.Healthcheck.Retries,
+ }
+}
+
+// SetHealthcheck sets recommended commands to run in order to verify that a
+// running container based on this image is "healthy", along with information
+// specifying how often that test should be run, and how many times the test
+// should fail before the container should be considered unhealthy.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
+func (b *Builder) SetHealthcheck(config *docker.HealthConfig) {
+ b.Docker.Config.Healthcheck = nil
+ if config != nil {
+ b.Docker.Config.Healthcheck = &docker.HealthConfig{
+ Test: copyStringSlice(config.Test),
+ Interval: config.Interval,
+ Timeout: config.Timeout,
+ StartPeriod: config.StartPeriod,
+ Retries: config.Retries,
+ }
+ }
+}
diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go
index 25f76cf74..e3bddba20 100644
--- a/vendor/github.com/containers/buildah/delete.go
+++ b/vendor/github.com/containers/buildah/delete.go
@@ -1,7 +1,6 @@
package buildah
import (
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
@@ -14,5 +13,5 @@ func (b *Builder) Delete() error {
b.MountPoint = ""
b.Container = ""
b.ContainerID = ""
- return label.ReleaseLabel(b.ProcessLabel)
+ return nil
}
diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go
index 759fc1246..6847d36fd 100644
--- a/vendor/github.com/containers/buildah/docker/types.go
+++ b/vendor/github.com/containers/buildah/docker/types.go
@@ -60,8 +60,9 @@ type HealthConfig struct {
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
- Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
- Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // Time to wait after the container starts before running the first check.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 31aff9eea..163d34269 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -57,22 +57,24 @@ type containerImageRef struct {
squash bool
tarPath func(path string) (io.ReadCloser, error)
parent string
+ blobDirectory string
}
type containerImageSource struct {
- path string
- ref *containerImageRef
- store storage.Store
- containerID string
- mountLabel string
- layerID string
- names []string
- compression archive.Compression
- config []byte
- configDigest digest.Digest
- manifest []byte
- manifestType string
- exporting bool
+ path string
+ ref *containerImageRef
+ store storage.Store
+ containerID string
+ mountLabel string
+ layerID string
+ names []string
+ compression archive.Compression
+ config []byte
+ configDigest digest.Digest
+ manifest []byte
+ manifestType string
+ exporting bool
+ blobDirectory string
}
func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) {
@@ -105,12 +107,11 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
// Compute the media types which we need to attach to a layer, given the type of
// compression that we'll be applying.
-func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) {
+func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) {
omediaType = v1.MediaTypeImageLayer
- //TODO: Convert to manifest.DockerV2Schema2LayerUncompressedMediaType once available
dmediaType = docker.V2S2MediaTypeUncompressedLayer
- if i.compression != archive.Uncompressed {
- switch i.compression {
+ if layerCompression != archive.Uncompressed {
+ switch layerCompression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = manifest.DockerV2Schema2LayerMediaType
@@ -281,19 +282,21 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// The default layer media type assumes no compression.
omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer
+ // Look up this layer.
+ layer, err := i.store.Layer(layerID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
+ }
// If we're not re-exporting the data, and we're reusing layers individually, reuse
// the blobsum and diff IDs.
if !i.exporting && !i.squash && layerID != i.layerID {
- layer, err2 := i.store.Layer(layerID)
- if err2 != nil {
- return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
- }
if layer.UncompressedDigest == "" {
return nil, errors.Errorf("unable to look up size of layer %q", layerID)
}
layerBlobSum := layer.UncompressedDigest
layerBlobSize := layer.UncompressedSize
- // Note this layer in the manifest, using the uncompressed blobsum.
+ diffID := layer.UncompressedDigest
+ // Note this layer in the manifest, using the appropriate blobsum.
olayerDescriptor := v1.Descriptor{
MediaType: omediaType,
Digest: layerBlobSum,
@@ -306,13 +309,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
Size: layerBlobSize,
}
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
- // Note this layer in the list of diffIDs, again using the uncompressed blobsum.
- oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum)
- dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum)
+ // Note this layer in the list of diffIDs, again using the uncompressed digest.
+ oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
+ dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
continue
}
- // Figure out if we need to change the media type, in case we're using compression.
- omediaType, dmediaType, err = i.computeLayerMIMEType(what)
+ // Figure out if we need to change the media type, in case we've changed the compression.
+ omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression)
if err != nil {
return nil, err
}
@@ -369,8 +372,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
logrus.Debugf("%s size is %d bytes", what, size)
// Rename the layer so that we can more easily find it by digest later.
- if err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())); err != nil {
- return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
+ finalBlobName := filepath.Join(path, destHasher.Digest().String())
+ if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
+ return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), finalBlobName)
}
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests.
@@ -473,19 +477,20 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
panic("unreachable code: unsupported manifest type")
}
src = &containerImageSource{
- path: path,
- ref: i,
- store: i.store,
- containerID: i.containerID,
- mountLabel: i.mountLabel,
- layerID: i.layerID,
- names: i.names,
- compression: i.compression,
- config: config,
- configDigest: digest.Canonical.FromBytes(config),
- manifest: imageManifest,
- manifestType: manifestType,
- exporting: i.exporting,
+ path: path,
+ ref: i,
+ store: i.store,
+ containerID: i.containerID,
+ mountLabel: i.mountLabel,
+ layerID: i.layerID,
+ names: i.names,
+ compression: i.compression,
+ config: config,
+ configDigest: digest.Canonical.FromBytes(config),
+ manifest: imageManifest,
+ manifestType: manifestType,
+ exporting: i.exporting,
+ blobDirectory: i.blobDirectory,
}
return src, nil
}
@@ -552,7 +557,7 @@ func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.B
return nil, nil
}
-func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
+func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
if blob.Digest == i.configDigest {
logrus.Debugf("start reading config")
reader := bytes.NewReader(i.config)
@@ -562,7 +567,16 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo)
}
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
}
- layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600)
+ var layerFile *os.File
+ for _, path := range []string{i.blobDirectory, i.path} {
+ layerFile, err = os.OpenFile(filepath.Join(path, blob.Digest.String()), os.O_RDONLY, 0600)
+ if err == nil {
+ break
+ }
+ if !os.IsNotExist(err) {
+ logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), path, err)
+ }
+ }
if err != nil {
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
return nil, -1, errors.Wrapf(err, "error opening file %q to buffer layer blob", filepath.Join(i.path, blob.Digest.String()))
@@ -585,7 +599,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo)
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
}
-func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
+func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
var name reference.Named
container, err := b.store.Container(b.ContainerID)
if err != nil {
@@ -631,6 +645,7 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
squash: squash,
tarPath: b.tarPath(),
parent: parent,
+ blobDirectory: blobDirectory,
}
return ref, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 41d85cbc6..2681bc198 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -10,11 +10,13 @@ import (
"os"
"os/exec"
"path/filepath"
+ "regexp"
"strconv"
"strings"
"time"
"github.com/containers/buildah"
+ buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
@@ -167,6 +169,8 @@ type BuildOptions struct {
// ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if
// the build was unsuccessful.
ForceRmIntermediateCtrs bool
+ // BlobDirectory is a directory which we'll use for caching layer blobs.
+ BlobDirectory string
}
// Executor is a buildah-based implementation of the imagebuilder.Executor
@@ -222,11 +226,24 @@ type Executor struct {
forceRmIntermediateCtrs bool
containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build
imageMap map[string]string // Used to map images that we create to handle the AS construct.
+ copyFrom string // Used to keep track of the --from flag from COPY and ADD
+ blobDirectory string
+}
+// builtinAllowedBuildArgs is list of built-in allowed build args
+var builtinAllowedBuildArgs = map[string]bool{
+ "HTTP_PROXY": true,
+ "http_proxy": true,
+ "HTTPS_PROXY": true,
+ "https_proxy": true,
+ "FTP_PROXY": true,
+ "ftp_proxy": true,
+ "NO_PROXY": true,
+ "no_proxy": true,
}
// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
-func (b *Executor) withName(name string, index int) *Executor {
+func (b *Executor) withName(name string, index int, from string) *Executor {
if b.named == nil {
b.named = make(map[string]*Executor)
}
@@ -235,6 +252,7 @@ func (b *Executor) withName(name string, index int) *Executor {
copied.name = name
child := &copied
b.named[name] = child
+ b.named[from] = child
if idx := strconv.Itoa(index); idx != name {
b.named[idx] = child
}
@@ -563,39 +581,40 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
registry: options.Registry,
transport: options.Transport,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
- quiet: options.Quiet,
- runtime: options.Runtime,
- runtimeArgs: options.RuntimeArgs,
- transientMounts: options.TransientMounts,
- compression: options.Compression,
- output: options.Output,
- outputFormat: options.OutputFormat,
- additionalTags: options.AdditionalTags,
- signaturePolicyPath: options.SignaturePolicyPath,
- systemContext: options.SystemContext,
- volumeCache: make(map[string]string),
- volumeCacheInfo: make(map[string]os.FileInfo),
- log: options.Log,
- in: options.In,
- out: options.Out,
- err: options.Err,
- reportWriter: options.ReportWriter,
- isolation: options.Isolation,
- namespaceOptions: options.NamespaceOptions,
- configureNetwork: options.ConfigureNetwork,
- cniPluginPath: options.CNIPluginPath,
- cniConfigDir: options.CNIConfigDir,
- idmappingOptions: options.IDMappingOptions,
- commonBuildOptions: options.CommonBuildOpts,
- defaultMountsFilePath: options.DefaultMountsFilePath,
- iidfile: options.IIDFile,
- squash: options.Squash,
- labels: append([]string{}, options.Labels...),
- annotations: append([]string{}, options.Annotations...),
- layers: options.Layers,
- noCache: options.NoCache,
- removeIntermediateCtrs: options.RemoveIntermediateCtrs,
- forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ quiet: options.Quiet,
+ runtime: options.Runtime,
+ runtimeArgs: options.RuntimeArgs,
+ transientMounts: options.TransientMounts,
+ compression: options.Compression,
+ output: options.Output,
+ outputFormat: options.OutputFormat,
+ additionalTags: options.AdditionalTags,
+ signaturePolicyPath: options.SignaturePolicyPath,
+ systemContext: options.SystemContext,
+ volumeCache: make(map[string]string),
+ volumeCacheInfo: make(map[string]os.FileInfo),
+ log: options.Log,
+ in: options.In,
+ out: options.Out,
+ err: options.Err,
+ reportWriter: options.ReportWriter,
+ isolation: options.Isolation,
+ namespaceOptions: options.NamespaceOptions,
+ configureNetwork: options.ConfigureNetwork,
+ cniPluginPath: options.CNIPluginPath,
+ cniConfigDir: options.CNIConfigDir,
+ idmappingOptions: options.IDMappingOptions,
+ commonBuildOptions: options.CommonBuildOpts,
+ defaultMountsFilePath: options.DefaultMountsFilePath,
+ iidfile: options.IIDFile,
+ squash: options.Squash,
+ labels: append([]string{}, options.Labels...),
+ annotations: append([]string{}, options.Annotations...),
+ layers: options.Layers,
+ noCache: options.NoCache,
+ removeIntermediateCtrs: options.RemoveIntermediateCtrs,
+ forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ blobDirectory: options.BlobDirectory,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -651,6 +670,7 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s
PullPolicy: b.pullPolicy,
Registry: b.registry,
Transport: b.transport,
+ PullBlobDirectory: b.blobDirectory,
SignaturePolicyPath: b.signaturePolicyPath,
ReportWriter: b.reportWriter,
SystemContext: b.systemContext,
@@ -764,7 +784,7 @@ func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) {
if err != nil {
candidates, _, err := util.ResolveName(b.output, "", b.systemContext, b.store)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q: %v", b.output)
+ return nil, errors.Wrapf(err, "error parsing target image name %q", b.output)
}
if len(candidates) == 0 {
return nil, errors.Errorf("error parsing target image name %q", b.output)
@@ -793,12 +813,28 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
commitName := b.output
b.containerIDs = nil
+ var leftoverArgs []string
+ for arg := range b.builder.Args {
+ if !builtinAllowedBuildArgs[arg] {
+ leftoverArgs = append(leftoverArgs, arg)
+ }
+ }
for i, node := range node.Children {
step := ib.Step()
if err := step.Resolve(node); err != nil {
return errors.Wrapf(err, "error resolving step %+v", *node)
}
logrus.Debugf("Parsed Step: %+v", *step)
+ if step.Command == "arg" {
+ for index, arg := range leftoverArgs {
+ for _, Arg := range step.Args {
+ list := strings.SplitN(Arg, "=", 2)
+ if arg == list[0] {
+ leftoverArgs = append(leftoverArgs[:index], leftoverArgs[index+1:]...)
+ }
+ }
+ }
+ }
if !b.quiet {
b.log("%s", step.Original)
}
@@ -826,6 +862,18 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
err error
imgID string
)
+
+ b.copyFrom = ""
+ // Check if --from exists in the step command of COPY or ADD
+ // If it exists, set b.copyfrom to that value
+ for _, n := range step.Flags {
+ if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") {
+ arr := strings.Split(n, "=")
+ b.copyFrom = b.named[arr[1]].mountPoint
+ break
+ }
+ }
+
// checkForLayers will be true if b.layers is true and a cached intermediate image is found.
// checkForLayers is set to false when either there is no cached image or a break occurs where
// the instructions in the Dockerfile change from a previous build.
@@ -848,6 +896,7 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
if err := b.copyExistingImage(ctx, cacheID); err != nil {
return err
}
+ b.containerIDs = append(b.containerIDs, b.builder.ContainerID)
break
}
@@ -882,6 +931,9 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
}
}
}
+ if len(leftoverArgs) > 0 {
+ fmt.Fprintf(b.out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs)
+ }
return nil
}
@@ -1009,6 +1061,11 @@ func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) {
currNode = currNode.Next
continue
}
+ if b.copyFrom != "" {
+ src = append(src, filepath.Join(b.copyFrom, currNode.Value))
+ currNode = currNode.Next
+ continue
+ }
matches, err := filepath.Glob(filepath.Join(b.contextDir, currNode.Value))
if err != nil {
return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value)
@@ -1044,17 +1101,19 @@ func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (
}
continue
}
- // For local files, walk the file tree and check the time stamps.
- timeIsGreater := false
- err := filepath.Walk(item, func(path string, info os.FileInfo, err error) error {
- if info.ModTime().After(*historyTime) {
- timeIsGreater = true
- return nil
- }
- return nil
- })
+ // Walks the file tree for local files and uses chroot to ensure we don't escape out of the allowed path
+ // when resolving any symlinks.
+ // Change the time format to ensure we don't run into a parsing error when converting again from string
+ // to time.Time. It is a known Go issue that the conversions cause errors sometimes, so specifying a particular
+ // time format here when converting to a string.
+ // If the COPY has --from in the command, change the rootdir to mountpoint of the container it is copying from
+ rootdir := b.contextDir
+ if b.copyFrom != "" {
+ rootdir = b.copyFrom
+ }
+ timeIsGreater, err := resolveModifiedTime(rootdir, item, historyTime.Format(time.RFC3339Nano))
if err != nil {
- return false, errors.Wrapf(err, "error walking file tree %q", item)
+ return false, errors.Wrapf(err, "error resolving symlinks and comparing modified times: %q", item)
}
if timeIsGreater {
return false, nil
@@ -1119,6 +1178,17 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, created
b.builder.SetEntrypoint(config.Entrypoint)
b.builder.SetShell(config.Shell)
b.builder.SetStopSignal(config.StopSignal)
+ if config.Healthcheck != nil {
+ b.builder.SetHealthcheck(&buildahdocker.HealthConfig{
+ Test: append([]string{}, config.Healthcheck.Test...),
+ Interval: config.Healthcheck.Interval,
+ Timeout: config.Healthcheck.Timeout,
+ StartPeriod: config.Healthcheck.StartPeriod,
+ Retries: config.Healthcheck.Retries,
+ })
+ } else {
+ b.builder.SetHealthcheck(nil)
+ }
b.builder.ClearLabels()
for k, v := range config.Labels {
b.builder.SetLabel(k, v)
@@ -1164,6 +1234,7 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, created
SystemContext: b.systemContext,
IIDFile: b.iidfile,
Squash: b.squash,
+ BlobDirectory: b.blobDirectory,
Parent: b.builder.FromImageID,
}
imgID, ref, _, err := b.builder.Commit(ctx, imageRef, options)
@@ -1189,8 +1260,16 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin
b.imageMap = make(map[string]string)
stageCount := 0
for _, stage := range stages {
- stageExecutor = b.withName(stage.Name, stage.Position)
- if err := stageExecutor.Prepare(ctx, stage, ""); err != nil {
+ ib := stage.Builder
+ node := stage.Node
+ base, err := ib.From(node)
+ if err != nil {
+ logrus.Debugf("Build(node.Children=%#v)", node.Children)
+ return "", nil, err
+ }
+
+ stageExecutor = b.withName(stage.Name, stage.Position, base)
+ if err := stageExecutor.Prepare(ctx, stage, base); err != nil {
return "", nil, err
}
// Always remove the intermediate/build containers, even if the build was unsuccessful.
@@ -1289,15 +1368,24 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
} else {
// If the Dockerfile isn't found try prepending the
// context directory to it.
- if _, err := os.Stat(dfile); os.IsNotExist(err) {
+ dinfo, err := os.Stat(dfile)
+ if os.IsNotExist(err) {
dfile = filepath.Join(options.ContextDirectory, dfile)
}
+ dinfo, err = os.Stat(dfile)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
+ }
+ // If given a directory, add '/Dockerfile' to it.
+ if dinfo.Mode().IsDir() {
+ dfile = filepath.Join(dfile, "Dockerfile")
+ }
logrus.Debugf("reading local Dockerfile %q", dfile)
contents, err := os.Open(dfile)
if err != nil {
return "", nil, errors.Wrapf(err, "error reading %q", dfile)
}
- dinfo, err := contents.Stat()
+ dinfo, err = contents.Stat()
if err != nil {
contents.Close()
return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
@@ -1320,6 +1408,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
dockerfiles = append(dockerfiles, data)
}
+
+ dockerfiles = processCopyFrom(dockerfiles)
+
mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0])
if err != nil {
return "", nil, errors.Wrapf(err, "error parsing main Dockerfile")
@@ -1336,10 +1427,87 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
return "", nil, errors.Wrapf(err, "error creating build executor")
}
b := imagebuilder.NewBuilder(options.Args)
- stages := imagebuilder.NewStages(mainNode, b)
+ stages, err := imagebuilder.NewStages(mainNode, b)
+ if err != nil {
+ return "", nil, errors.Wrap(err, "error reading multiple stages")
+ }
return exec.Build(ctx, stages)
}
+// processCopyFrom goes through the Dockerfiles and handles any 'COPY --from' instances
+// prepending a new FROM statement the Dockerfile that do not already have a corresponding
+// FROM command within them.
+func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser {
+
+ var newDockerfiles []io.ReadCloser
+ // fromMap contains the names of the images seen in a FROM
+ // line in the Dockerfiles. The boolean value just completes the map object.
+ fromMap := make(map[string]bool)
+ // asMap contains the names of the images seen after a "FROM image AS"
+ // line in the Dockefiles. The boolean value just completes the map object.
+ asMap := make(map[string]bool)
+
+ copyRE := regexp.MustCompile(`\s*COPY\s+--from=`)
+ fromRE := regexp.MustCompile(`\s*FROM\s+`)
+ asRE := regexp.MustCompile(`(?i)\s+as\s+`)
+ for _, dfile := range dockerfiles {
+ if dfileBinary, err := ioutil.ReadAll(dfile); err == nil {
+ dfileString := fmt.Sprintf("%s", dfileBinary)
+ copyFromContent := copyRE.Split(dfileString, -1)
+ // no "COPY --from=", just continue
+ if len(copyFromContent) < 2 {
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString)))
+ continue
+ }
+ // Load all image names in our Dockerfiles into a map
+ // for easy reference later.
+ fromContent := fromRE.Split(dfileString, -1)
+ for i := 0; i < len(fromContent); i++ {
+ imageName := strings.Split(fromContent[i], " ")
+ if len(imageName) > 0 {
+ finalImage := strings.Split(imageName[0], "\n")
+ if finalImage[0] != "" {
+ fromMap[strings.TrimSpace(finalImage[0])] = true
+ }
+ }
+ }
+ logrus.Debug("fromMap: ", fromMap)
+
+ // Load all image names associated with an 'as' or 'AS' in
+ // our Dockerfiles into a map for easy reference later.
+ asContent := asRE.Split(dfileString, -1)
+ // Skip the first entry in the array as it's stuff before
+ // the " as " and we don't care.
+ for i := 1; i < len(asContent); i++ {
+ asName := strings.Split(asContent[i], " ")
+ if len(asName) > 0 {
+ finalAsImage := strings.Split(asName[0], "\n")
+ if finalAsImage[0] != "" {
+ asMap[strings.TrimSpace(finalAsImage[0])] = true
+ }
+ }
+ }
+ logrus.Debug("asMap: ", asMap)
+
+ for i := 1; i < len(copyFromContent); i++ {
+ fromArray := strings.Split(copyFromContent[i], " ")
+ // If the image isn't a stage number or already declared,
+ // add a FROM statement for it to the top of our Dockerfile.
+ trimmedFrom := strings.TrimSpace(fromArray[0])
+ _, okFrom := fromMap[trimmedFrom]
+ _, okAs := asMap[trimmedFrom]
+ _, err := strconv.Atoi(trimmedFrom)
+ if !okFrom && !okAs && err != nil {
+ from := "FROM " + trimmedFrom
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(from)))
+ }
+ }
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString)))
+ } // End if dfileBinary, err := ioutil.ReadAll(dfile); err == nil
+ } // End for _, dfile := range dockerfiles {
+ return newDockerfiles
+}
+
// deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs
// and deletes the containers associated with that ID.
func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
index 20e396f1f..edb5837db 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"strings"
+ "time"
"github.com/containers/storage/pkg/reexec"
"github.com/pkg/errors"
@@ -14,13 +15,18 @@ import (
const (
symlinkChrootedCommand = "chrootsymlinks-resolve"
+ symlinkModifiedTime = "modtimesymlinks-resolve"
maxSymlinksResolved = 40
)
func init() {
reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks)
+ reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified)
}
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
func resolveChrootedSymlinks() {
status := 0
flag.Parse()
@@ -39,7 +45,7 @@ func resolveChrootedSymlinks() {
}
// Our second parameter is the path name to evaluate for symbolic links
- symLink, err := getSymbolicLink(flag.Arg(0), flag.Arg(1))
+ symLink, err := getSymbolicLink(flag.Arg(1))
if err != nil {
fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err)
os.Exit(1)
@@ -51,7 +57,8 @@ func resolveChrootedSymlinks() {
os.Exit(status)
}
-// ResolveSymlink resolves any symlink in filename in the context of rootdir.
+// ResolveSymLink (in the grandparent process) resolves any symlink in filename
+// in the context of rootdir.
func ResolveSymLink(rootdir, filename string) (string, error) {
// The child process expects a chroot and one path that
// will be consulted relative to the chroot directory and evaluated
@@ -62,32 +69,128 @@ func ResolveSymLink(rootdir, filename string) (string, error) {
return "", errors.Wrapf(err, string(output))
}
- // Hand back the resolved symlink, will be "" if a symlink is not found
+ // Hand back the resolved symlink, will be filename if a symlink is not found
return string(output), nil
}
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
+func resolveSymlinkTimeModified() {
+ status := 0
+ flag.Parse()
+ if len(flag.Args()) < 1 {
+ os.Exit(1)
+ }
+ // Our first parameter is the directory to chroot into.
+ if err := unix.Chdir(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chdir(): %v\n", err)
+ os.Exit(1)
+ }
+ if err := unix.Chroot(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chroot(): %v\n", err)
+ os.Exit(1)
+ }
+
+ // Our second parameter is the path name to evaluate for symbolic links.
+ // Our third parameter is the time the cached intermediate image was created.
+ // We check whether the modified time of the filepath we provide is after the time the cached image was created.
+ timeIsGreater, err := modTimeIsGreater(flag.Arg(0), flag.Arg(1), flag.Arg(2))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error checking if modified time of resolved symbolic link is greater: %v\n", err)
+ os.Exit(1)
+ }
+ if _, err := os.Stdout.WriteString(fmt.Sprintf("%v", timeIsGreater)); err != nil {
+ fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err)
+ os.Exit(1)
+ }
+ os.Exit(status)
+}
+
+// resolveModifiedTime (in the grandparent process) checks filename for any symlinks,
+// resolves it and compares the modified time of the file with historyTime, which is
+// the creation time of the cached image. It returns true if filename was modified after
+// historyTime, otherwise returns false.
+func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
+ // The child process expects a chroot and one path that
+ // will be consulted relative to the chroot directory and evaluated
+ // for any symbolic links present.
+ cmd := reexec.Command(symlinkModifiedTime, rootdir, filename, historyTime)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return false, errors.Wrapf(err, string(output))
+ }
+ // Hand back true/false depending on in the file was modified after the caches image was created.
+ return string(output) == "true", nil
+}
+
+// modTimeIsGreater goes through the files added/copied in using the Dockerfile and
+// checks the time stamp (follows symlinks) with the time stamp of when the cached
+// image was created. IT compares the two and returns true if the file was modified
+// after the cached image was created, otherwise it returns false.
+func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
+ var timeIsGreater bool
+
+ // Convert historyTime from string to time.Time for comparison
+ histTime, err := time.Parse(time.RFC3339Nano, historyTime)
+ if err != nil {
+ return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime)
+ }
+ // Walk the file tree and check the time stamps.
+ // Since we are chroot in rootdir, only want the path of the actual filename, i.e path - rootdir.
+ // +1 to account for the extra "/" (e.g rootdir=/home/user/mydir, path=/home/user/mydir/myfile.json)
+ err = filepath.Walk(path[len(rootdir)+1:], func(path string, info os.FileInfo, err error) error {
+ // If using cached images, it is possible for files that are being copied to come from
+ // previous build stages. But if using cached images, then the copied file won't exist
+ // since a container won't have been created for the previous build stage and info will be nil.
+ // In that case just return nil and continue on with using the cached image for the whole build process.
+ if info == nil {
+ return nil
+ }
+ modTime := info.ModTime()
+ if info.Mode()&os.ModeSymlink == os.ModeSymlink {
+ // Evaluate any symlink that occurs to get updated modified information
+ resolvedPath, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return errors.Wrapf(err, "error evaluating symlink %q", path)
+ }
+ fileInfo, err := os.Stat(resolvedPath)
+ if err != nil {
+ return errors.Wrapf(err, "error getting file info %q", resolvedPath)
+ }
+ modTime = fileInfo.ModTime()
+ }
+ if modTime.After(histTime) {
+ timeIsGreater = true
+ return nil
+ }
+ return nil
+ })
+ if err != nil {
+ return false, errors.Wrapf(err, "error walking file tree %q", path)
+ }
+ return timeIsGreater, err
+}
+
// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear.
// Returns what the whole target path for what "path" resolves to.
-func getSymbolicLink(rootdir, path string) (string, error) {
+func getSymbolicLink(path string) (string, error) {
var (
symPath string
symLinksResolved int
)
-
- // Splitting path as we need to resolve each parth of the path at a time
+ // Splitting path as we need to resolve each part of the path at a time
splitPath := strings.Split(path, "/")
if splitPath[0] == "" {
splitPath = splitPath[1:]
symPath = "/"
}
-
for _, p := range splitPath {
// If we have resolved 40 symlinks, that means something is terribly wrong
// will return an error and exit
if symLinksResolved >= maxSymlinksResolved {
return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved)
}
-
symPath = filepath.Join(symPath, p)
isSymlink, resolvedPath, err := hasSymlink(symPath)
if err != nil {
@@ -119,16 +222,21 @@ func getSymbolicLink(rootdir, path string) (string, error) {
// otherwise it returns false and path
func hasSymlink(path string) (bool, string, error) {
info, err := os.Lstat(path)
- if os.IsNotExist(err) {
- if err = os.MkdirAll(path, 0755); err != nil {
- return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
- }
- info, err = os.Lstat(path)
- if err != nil {
- return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(path, 0755); err != nil {
+ return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
+ }
+ info, err = os.Lstat(path)
+ if err != nil {
+ return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ }
+ } else {
+ return false, path, errors.Wrapf(err, "error get stat of path %q", path)
}
}
- // Return false and path as path is not a symlink
+
+ // Return false and path as path if not a symlink
if info.Mode()&os.ModeSymlink != os.ModeSymlink {
return false, path, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go
index 35dc5438a..4f5301b73 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/util.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/util.go
@@ -111,3 +111,28 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
func InitReexec() bool {
return buildah.InitReexec()
}
+
+// ReposToMap parses the specified repotags and returns a map with repositories
+// as keys and the corresponding arrays of tags as values.
+func ReposToMap(repotags []string) map[string][]string {
+ // map format is repo -> tag
+ repos := make(map[string][]string)
+ for _, repo := range repotags {
+ var repository, tag string
+ if strings.Contains(repo, ":") {
+ li := strings.LastIndex(repo, ":")
+ repository = repo[0:li]
+ tag = repo[li+1:]
+ } else if len(repo) > 0 {
+ repository = repo
+ tag = "<none>"
+ } else {
+ logrus.Warnf("Found image with empty name")
+ }
+ repos[repository] = append(repos[repository], tag)
+ }
+ if len(repos) == 0 {
+ repos["<none>"] = []string{"<none>"}
+ }
+ return repos
+}
diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go
new file mode 100644
index 000000000..8cd5e4438
--- /dev/null
+++ b/vendor/github.com/containers/buildah/info.go
@@ -0,0 +1,207 @@
+package buildah
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// InfoData holds the info type, i.e store, host etc and the data for each type
+type InfoData struct {
+ Type string
+ Data map[string]interface{}
+}
+
+// Info returns the store and host information
+func Info(store storage.Store) ([]InfoData, error) {
+ info := []InfoData{}
+ // get host information
+ hostInfo, err := hostInfo()
+ if err != nil {
+ logrus.Error(err, "error getting host info")
+ }
+ info = append(info, InfoData{Type: "host", Data: hostInfo})
+
+ // get store information
+ storeInfo, err := storeInfo(store)
+ if err != nil {
+ logrus.Error(err, "error getting store info")
+ }
+ info = append(info, InfoData{Type: "store", Data: storeInfo})
+ return info, nil
+}
+
+func hostInfo() (map[string]interface{}, error) {
+ info := map[string]interface{}{}
+ info["os"] = runtime.GOOS
+ info["arch"] = runtime.GOARCH
+ info["cpus"] = runtime.NumCPU()
+ info["rootless"] = rootless.IsRootless()
+ mi, err := system.ReadMemInfo()
+ if err != nil {
+ logrus.Error(err, "err reading memory info")
+ info["MemTotal"] = ""
+ info["MenFree"] = ""
+ info["SwapTotal"] = ""
+ info["SwapFree"] = ""
+ } else {
+ info["MemTotal"] = mi.MemTotal
+ info["MenFree"] = mi.MemFree
+ info["SwapTotal"] = mi.SwapTotal
+ info["SwapFree"] = mi.SwapFree
+ }
+ hostDistributionInfo := getHostDistributionInfo()
+ info["Distribution"] = map[string]interface{}{
+ "distribution": hostDistributionInfo["Distribution"],
+ "version": hostDistributionInfo["Version"],
+ }
+
+ kv, err := readKernelVersion()
+ if err != nil {
+ logrus.Error(err, "error reading kernel version")
+ }
+ info["kernel"] = kv
+
+ up, err := readUptime()
+ if err != nil {
+ logrus.Error(err, "error reading up time")
+ }
+ // Convert uptime in seconds to a human-readable format
+ upSeconds := up + "s"
+ upDuration, err := time.ParseDuration(upSeconds)
+ if err != nil {
+ logrus.Error(err, "error parsing system uptime")
+ }
+
+ hoursFound := false
+ var timeBuffer bytes.Buffer
+ var hoursBuffer bytes.Buffer
+ for _, elem := range upDuration.String() {
+ timeBuffer.WriteRune(elem)
+ if elem == 'h' || elem == 'm' {
+ timeBuffer.WriteRune(' ')
+ if elem == 'h' {
+ hoursFound = true
+ }
+ }
+ if !hoursFound {
+ hoursBuffer.WriteRune(elem)
+ }
+ }
+
+ info["uptime"] = timeBuffer.String()
+ if hoursFound {
+ hours, err := strconv.ParseFloat(hoursBuffer.String(), 64)
+ if err == nil {
+ days := hours / 24
+ info["uptime"] = fmt.Sprintf("%s (Approximately %.2f days)", info["uptime"], days)
+ }
+ }
+
+ host, err := os.Hostname()
+ if err != nil {
+ logrus.Error(err, "error getting hostname")
+ }
+ info["hostname"] = host
+
+ return info, nil
+
+}
+
+// top-level "store" info
+func storeInfo(store storage.Store) (map[string]interface{}, error) {
+ // lets say storage driver in use, number of images, number of containers
+ info := map[string]interface{}{}
+ info["GraphRoot"] = store.GraphRoot()
+ info["RunRoot"] = store.RunRoot()
+ info["GraphDriverName"] = store.GraphDriverName()
+ info["GraphOptions"] = store.GraphOptions()
+ statusPairs, err := store.Status()
+ if err != nil {
+ return nil, err
+ }
+ status := map[string]string{}
+ for _, pair := range statusPairs {
+ status[pair[0]] = pair[1]
+ }
+ info["GraphStatus"] = status
+ images, err := store.Images()
+ if err != nil {
+ logrus.Error(err, "error getting number of images")
+ }
+ info["ImageStore"] = map[string]interface{}{
+ "number": len(images),
+ }
+
+ containers, err := store.Containers()
+ if err != nil {
+ logrus.Error(err, "error getting number of containers")
+ }
+ info["ContainerStore"] = map[string]interface{}{
+ "number": len(containers),
+ }
+
+ return info, nil
+}
+
+func readKernelVersion() (string, error) {
+ buf, err := ioutil.ReadFile("/proc/version")
+ if err != nil {
+ return "", err
+ }
+ f := bytes.Fields(buf)
+ if len(f) < 2 {
+ return string(bytes.TrimSpace(buf)), nil
+ }
+ return string(f[2]), nil
+}
+
+func readUptime() (string, error) {
+ buf, err := ioutil.ReadFile("/proc/uptime")
+ if err != nil {
+ return "", err
+ }
+ f := bytes.Fields(buf)
+ if len(f) < 1 {
+ return "", fmt.Errorf("invalid uptime")
+ }
+ return string(f[0]), nil
+}
+
+// getHostDistributionInfo returns a map containing the host's distribution and version
+func getHostDistributionInfo() map[string]string {
+ dist := make(map[string]string)
+
+ // Populate values in case we cannot find the values
+ // or the file
+ dist["Distribution"] = "unknown"
+ dist["Version"] = "unknown"
+
+ f, err := os.Open("/etc/os-release")
+ if err != nil {
+ return dist
+ }
+ defer f.Close()
+
+ l := bufio.NewScanner(f)
+ for l.Scan() {
+ if strings.HasPrefix(l.Text(), "ID=") {
+ dist["Distribution"] = strings.TrimPrefix(l.Text(), "ID=")
+ }
+ if strings.HasPrefix(l.Text(), "VERSION_ID=") {
+ dist["Version"] = strings.Trim(strings.TrimPrefix(l.Text(), "VERSION_ID="), "\"")
+ }
+ }
+ return dist
+}
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 8b0e774ba..7e7f97e49 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -3,6 +3,7 @@ package buildah
import (
"context"
"fmt"
+ "math/rand"
"strings"
"github.com/containers/buildah/util"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/storage"
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/openshift/imagebuilder"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -23,11 +23,6 @@ const (
// as "no image".
BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
- // DefaultTransport is a prefix that we apply to an image name if we
- // can't find one in the local Store, in order to generate a source
- // reference for the image that we can then copy to the local Store.
- DefaultTransport = "docker://"
-
// minimumTruncatedIDLength is the minimum length of an identifier that
// we'll accept as possibly being a truncated image ID.
minimumTruncatedIDLength = 3
@@ -39,6 +34,7 @@ func pullAndFindImage(ctx context.Context, store storage.Store, imageName string
Store: store,
SystemContext: options.SystemContext,
Transport: options.Transport,
+ BlobDirectory: options.PullBlobDirectory,
}
ref, err := pullImage(ctx, store, imageName, pullOptions, sc)
if err != nil {
@@ -150,7 +146,7 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
}
logrus.Debugf("error parsing image name %q as given, trying with transport %q: %v", image, options.Transport, err)
transport := options.Transport
- if transport != DefaultTransport {
+ if transport != util.DefaultTransport {
transport = transport + ":"
}
srcRef2, err := alltransports.ParseImageName(transport + image)
@@ -232,6 +228,27 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
}
}
+func containerNameExist(name string, containers []storage.Container) bool {
+ for _, container := range containers {
+ for _, cname := range container.Names {
+ if cname == name {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func findUnusedContainer(name string, containers []storage.Container) string {
+ suffix := 1
+ tmpName := name
+ for containerNameExist(tmpName, containers) {
+ tmpName = fmt.Sprintf("%s-%d", name, suffix)
+ suffix++
+ }
+ return tmpName
+}
+
func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
var ref types.ImageReference
var img *storage.Image
@@ -241,7 +258,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
options.FromImage = ""
}
if options.Transport == "" {
- options.Transport = DefaultTransport
+ options.Transport = util.DefaultTransport
}
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
@@ -277,23 +294,33 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
name = imageNamePrefix(image) + "-" + name
}
}
+ var container *storage.Container
+ tmpName := name
+ if options.Container == "" {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to check for container names")
+ }
+ tmpName = findUnusedContainer(tmpName, containers)
+ }
- coptions := storage.ContainerOptions{}
- coptions.IDMappingOptions = newContainerIDMappingOptions(options.IDMappingOptions)
-
- container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions)
- suffix := 1
- for err != nil && errors.Cause(err) == storage.ErrDuplicateName && options.Container == "" {
- suffix++
- tmpName := fmt.Sprintf("%s-%d", name, suffix)
- if container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions); err == nil {
+ conflict := 100
+ for true {
+ coptions := storage.ContainerOptions{
+ LabelOpts: options.CommonBuildOpts.LabelOpts,
+ IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions),
+ }
+ container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions)
+ if err == nil {
name = tmpName
+ break
}
+ if errors.Cause(err) != storage.ErrDuplicateName || options.Container != "" {
+ return nil, errors.Wrapf(err, "error creating container")
+ }
+ tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict)
+ conflict = conflict * 10
}
- if err != nil {
- return nil, errors.Wrapf(err, "error creating container")
- }
-
defer func() {
if err != nil {
if err2 := store.DeleteContainer(container.ID); err2 != nil {
@@ -302,13 +329,6 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
}()
- if err = ReserveSELinuxLabels(store, container.ID); err != nil {
- return nil, err
- }
- processLabel, mountLabel, err := label.InitLabels(options.CommonBuildOpts.LabelOpts)
- if err != nil {
- return nil, err
- }
uidmap, gidmap := convertStorageIDMaps(container.UIDMap, container.GIDMap)
defaultNamespaceOptions, err := DefaultNamespaceOptions()
@@ -328,8 +348,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
ContainerID: container.ID,
ImageAnnotations: map[string]string{},
ImageCreatedBy: "",
- ProcessLabel: processLabel,
- MountLabel: mountLabel,
+ ProcessLabel: container.ProcessLabel(),
+ MountLabel: container.MountLabel(),
DefaultMountsFilePath: options.DefaultMountsFilePath,
Isolation: options.Isolation,
NamespaceOptions: namespaceOptions,
@@ -351,7 +371,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
if options.Mount {
- _, err = builder.Mount(mountLabel)
+ _, err = builder.Mount(container.MountLabel())
if err != nil {
return nil, errors.Wrapf(err, "error mounting build container %q", builder.ContainerID)
}
diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
new file mode 100644
index 000000000..63022f15d
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
@@ -0,0 +1,517 @@
+package blobcache
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/containers/buildah/docker"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ _ types.ImageReference = &blobCacheReference{}
+ _ types.ImageSource = &blobCacheSource{}
+ _ types.ImageDestination = &blobCacheDestination{}
+)
+
+const (
+ compressedNote = ".compressed"
+ decompressedNote = ".decompressed"
+)
+
+// BlobCache is an object which saves copies of blobs that are written to it while passing them
+// through to some real destination, and which can be queried directly in order to read them
+// back.
+type BlobCache interface {
+ types.ImageReference
+ // HasBlob checks if a blob that matches the passed-in digest (and
+ // size, if not -1), is present in the cache.
+ HasBlob(types.BlobInfo) (bool, int64, error)
+ // Directories returns the list of cache directories.
+ Directory() string
+ // ClearCache() clears the contents of the cache directories. Note
+ // that this also clears content which was not placed there by this
+ // cache implementation.
+ ClearCache() error
+}
+
+type blobCacheReference struct {
+ reference types.ImageReference
+ directory string
+ compress types.LayerCompression
+}
+
+type blobCacheSource struct {
+ reference *blobCacheReference
+ source types.ImageSource
+ sys types.SystemContext
+ cacheHits int64
+ cacheMisses int64
+ cacheErrors int64
+}
+
+type blobCacheDestination struct {
+ reference *blobCacheReference
+ destination types.ImageDestination
+}
+
+func makeFilename(blobSum digest.Digest, isConfig bool) string {
+ if isConfig {
+ return blobSum.String() + ".config"
+ }
+ return blobSum.String()
+}
+
+// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
+// written to the destination image created from the resulting reference will also be stored
+// as-is to the specifed directory or a temporary directory. The cache directory's contents
+// can be cleared by calling the returned BlobCache()'s ClearCache() method.
+// The compress argument controls whether or not the cache will try to substitute a compressed
+// or different version of a blob when preparing the list of layers when reading an image.
+func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) {
+ if directory == "" {
+ return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref))
+ }
+ switch compress {
+ case types.Compress, types.Decompress, types.PreserveOriginal:
+ // valid value, accept it
+ default:
+ return nil, errors.Errorf("unhandled LayerCompression value %v", compress)
+ }
+ return &blobCacheReference{
+ reference: ref,
+ directory: directory,
+ compress: compress,
+ }, nil
+}
+
+func (r *blobCacheReference) Transport() types.ImageTransport {
+ return r.reference.Transport()
+}
+
+func (r *blobCacheReference) StringWithinTransport() string {
+ return r.reference.StringWithinTransport()
+}
+
+func (r *blobCacheReference) DockerReference() reference.Named {
+ return r.reference.DockerReference()
+}
+
+func (r *blobCacheReference) PolicyConfigurationIdentity() string {
+ return r.reference.PolicyConfigurationIdentity()
+}
+
+func (r *blobCacheReference) PolicyConfigurationNamespaces() []string {
+ return r.reference.PolicyConfigurationNamespaces()
+}
+
+func (r *blobCacheReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return r.reference.DeleteImage(ctx, sys)
+}
+
+func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
+ if blobinfo.Digest == "" {
+ return false, -1, nil
+ }
+
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(r.directory, makeFilename(blobinfo.Digest, isConfig))
+ fileInfo, err := os.Stat(filename)
+ if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) {
+ return true, fileInfo.Size(), nil
+ }
+ if !os.IsNotExist(err) {
+ return false, -1, errors.Wrapf(err, "error checking size of %q", filename)
+ }
+ }
+
+ return false, -1, nil
+}
+
+func (r *blobCacheReference) Directory() string {
+ return r.directory
+}
+
+func (r *blobCacheReference) ClearCache() error {
+ f, err := os.Open(r.directory)
+ if err != nil {
+ return errors.Wrapf(err, "error opening directory %q", r.directory)
+ }
+ defer f.Close()
+ names, err := f.Readdirnames(-1)
+ if err != nil {
+ return errors.Wrapf(err, "error reading directory %q", r.directory)
+ }
+ for _, name := range names {
+ pathname := filepath.Join(r.directory, name)
+ if err = os.RemoveAll(pathname); err != nil {
+ return errors.Wrapf(err, "error removing %q while clearing cache for %q", pathname, transports.ImageName(r))
+ }
+ }
+ return nil
+}
+
+func (r *blobCacheReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ src, err := r.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(r.reference))
+ }
+ return image.FromSource(ctx, sys, src)
+}
+
+func (r *blobCacheReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ src, err := r.reference.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(r.reference))
+ }
+ logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(r.reference), r.directory, r.compress)
+ return &blobCacheSource{reference: r, source: src, sys: *sys}, nil
+}
+
+func (r *blobCacheReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ dest, err := r.reference.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(r.reference))
+ }
+ logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(r.reference), r.directory)
+ return &blobCacheDestination{reference: r, destination: dest}, nil
+}
+
+func (s *blobCacheSource) Reference() types.ImageReference {
+ return s.reference
+}
+
+func (s *blobCacheSource) Close() error {
+ logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors)
+ return s.source.Close()
+}
+
+func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false))
+ manifestBytes, err := ioutil.ReadFile(filename)
+ if err == nil {
+ s.cacheHits++
+ return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
+ }
+ if !os.IsNotExist(err) {
+ s.cacheErrors++
+ return nil, "", errors.Wrapf(err, "error checking for manifest file %q", filename)
+ }
+ }
+ s.cacheMisses++
+ return s.source.GetManifest(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ present, size, err := s.reference.HasBlob(blobinfo)
+ if err != nil {
+ return nil, -1, err
+ }
+ if present {
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig))
+ f, err := os.Open(filename)
+ if err == nil {
+ s.cacheHits++
+ return f, size, nil
+ }
+ if !os.IsNotExist(err) {
+ s.cacheErrors++
+ return nil, -1, errors.Wrapf(err, "error checking for cache file %q", filepath.Join(s.reference.directory, filename))
+ }
+ }
+ }
+ s.cacheMisses++
+ rc, size, err := s.source.GetBlob(ctx, blobinfo, cache)
+ if err != nil {
+ return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference))
+ }
+ return rc, size, nil
+}
+
+func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ return s.source.GetSignatures(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ signatures, err := s.source.GetSignatures(ctx, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference))
+ }
+ canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0)
+
+ infos, err := s.source.LayerInfosForCopy(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference))
+ }
+ if infos == nil {
+ image, err := s.reference.NewImage(ctx, &s.sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference))
+ }
+ defer image.Close()
+ infos = image.LayerInfos()
+ }
+
+ if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
+ replacedInfos := make([]types.BlobInfo, 0, len(infos))
+ for _, info := range infos {
+ var replaceDigest []byte
+ var err error
+ blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false))
+ alternate := ""
+ switch s.reference.compress {
+ case types.Compress:
+ alternate = blobFile + compressedNote
+ replaceDigest, err = ioutil.ReadFile(alternate)
+ case types.Decompress:
+ alternate = blobFile + decompressedNote
+ replaceDigest, err = ioutil.ReadFile(alternate)
+ }
+ if err == nil && digest.Digest(replaceDigest).Validate() == nil {
+ alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false))
+ fileInfo, err := os.Stat(alternate)
+ if err == nil {
+ logrus.Debugf("suggesting cached blob with digest %q and compression %v in place of blob with digest %q", string(replaceDigest), s.reference.compress, info.Digest.String())
+ info.Digest = digest.Digest(replaceDigest)
+ info.Size = fileInfo.Size()
+ switch info.MediaType {
+ case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = v1.MediaTypeImageLayerGzip
+ case types.Decompress:
+ info.MediaType = v1.MediaTypeImageLayer
+ }
+ case docker.V2S2MediaTypeUncompressedLayer, manifest.DockerV2Schema2LayerMediaType:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = manifest.DockerV2Schema2LayerMediaType
+ case types.Decompress:
+ info.MediaType = docker.V2S2MediaTypeUncompressedLayer
+ }
+ }
+ }
+ }
+ replacedInfos = append(replacedInfos, info)
+ }
+ infos = replacedInfos
+ }
+
+ return infos, nil
+}
+
+func (d *blobCacheDestination) Reference() types.ImageReference {
+ return d.reference
+}
+
+func (d *blobCacheDestination) Close() error {
+ logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference))
+ return d.destination.Close()
+}
+
+func (d *blobCacheDestination) SupportedManifestMIMETypes() []string {
+ return d.destination.SupportedManifestMIMETypes()
+}
+
+func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error {
+ return d.destination.SupportsSignatures(ctx)
+}
+
+func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression {
+ return d.destination.DesiredLayerCompression()
+}
+
+func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool {
+ return d.destination.AcceptsForeignLayerURLs()
+}
+
+func (d *blobCacheDestination) MustMatchRuntimeOS() bool {
+ return d.destination.MustMatchRuntimeOS()
+}
+
+func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.destination.IgnoresEmbeddedDockerReference()
+}
+
+// Decompress and save the contents of the decompressReader stream into the passed-in temporary
+// file. If we successfully save all of the data, rename the file to match the digest of the data,
+// and make notes about the relationship between the file that holds a copy of the compressed data
+// and this new file.
+func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
+ defer wg.Done()
+ // Decompress from and digest the reading end of that pipe.
+ decompressed, err3 := archive.DecompressStream(decompressReader)
+ digester := digest.Canonical.Digester()
+ if err3 == nil {
+ // Read the decompressed data through the filter over the pipe, blocking until the
+ // writing end is closed.
+ _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
+ } else {
+ // Drain the pipe to keep from stalling the PutBlob() thread.
+ io.Copy(ioutil.Discard, decompressReader)
+ }
+ decompressReader.Close()
+ decompressed.Close()
+ tempFile.Close()
+ // Determine the name that we should give to the uncompressed copy of the blob.
+ decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig))
+ if err3 == nil {
+ // Rename the temporary file.
+ if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil {
+ logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3)
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ } else {
+ *alternateDigest = digester.Digest()
+ // Note the relationship between the two files.
+ if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3)
+ }
+ if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3)
+ }
+ }
+ } else {
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ }
+}
+
+func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ var tempfile *os.File
+ var err error
+ var n int
+ var alternateDigest digest.Digest
+ wg := new(sync.WaitGroup)
+ defer wg.Wait()
+ compression := archive.Uncompressed
+ if inputInfo.Digest != "" {
+ filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ if err == nil {
+ stream = io.TeeReader(stream, tempfile)
+ defer func() {
+ if err == nil {
+ if err = os.Rename(tempfile.Name(), filename); err != nil {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename)
+ }
+ } else {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ }
+ tempfile.Close()
+ }()
+ } else {
+ logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err)
+ }
+ if !isConfig {
+ initial := make([]byte, 8)
+ n, err = stream.Read(initial)
+ if n > 0 {
+ // Build a Reader that will still return the bytes that we just
+ // read, for PutBlob()'s sake.
+ stream = io.MultiReader(bytes.NewReader(initial[:n]), stream)
+ if n >= len(initial) {
+ compression = archive.DetectCompression(initial[:n])
+ }
+ if compression != archive.Uncompressed {
+ // The stream is compressed, so create a file which we'll
+ // use to store a decompressed copy.
+ decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ if err2 != nil {
+ logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2)
+ decompressedTemp.Close()
+ } else {
+ // Write a copy of the compressed data to a pipe,
+ // closing the writing end of the pipe after
+ // PutBlob() returns.
+ decompressReader, decompressWriter := io.Pipe()
+ defer decompressWriter.Close()
+ stream = io.TeeReader(stream, decompressWriter)
+ // Let saveStream() close the reading end and handle the temporary file.
+ wg.Add(1)
+ go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest)
+ }
+ }
+ }
+ }
+ }
+ newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+ if err != nil {
+ return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference))
+ }
+ if alternateDigest.Validate() == nil {
+ logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory)
+ } else {
+ logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory)
+ }
+ return newBlobInfo, nil
+}
+
+func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute)
+ if err != nil || present {
+ return present, reusedInfo, err
+ }
+
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig))
+ f, err := os.Open(filename)
+ if err == nil {
+ defer f.Close()
+ uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ return true, uploadedInfo, nil
+ }
+ }
+
+ return false, types.BlobInfo{}, nil
+}
+
+func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte) error {
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
+ } else {
+ filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false))
+ if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
+ logrus.Warnf("error saving manifest as %q: %v", filename, err)
+ }
+ }
+ return d.destination.PutManifest(ctx, manifestBytes)
+}
+
+func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+ return d.destination.PutSignatures(ctx, signatures)
+}
+
+func (d *blobCacheDestination) Commit(ctx context.Context) error {
+ return d.destination.Commit(ctx)
+}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index b54663f5d..e3a4fe62a 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -71,6 +71,10 @@ var (
LayerFlags = []cli.Flag{
cli.BoolFlag{
+ Name: "force-rm",
+ Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
+ },
+ cli.BoolFlag{
Name: "layers",
Usage: fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()),
},
@@ -108,6 +112,10 @@ var (
Usage: "use `[username[:password]]` for accessing the registry",
},
cli.BoolFlag{
+ Name: "disable-compression, D",
+ Usage: "don't compress layers by default",
+ },
+ cli.BoolFlag{
Name: "disable-content-trust",
Usage: "This is a Docker specific option and is a NOOP",
},
@@ -115,10 +123,6 @@ var (
Name: "file, f",
Usage: "`pathname or URL` of a Dockerfile",
},
- cli.BoolFlag{
- Name: "force-rm",
- Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
- },
cli.StringFlag{
Name: "format",
Usage: "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.",
@@ -192,6 +196,12 @@ var (
Name: "add-host",
Usage: "add a custom host-to-IP mapping (`host:ip`) (default [])",
},
+ cli.StringFlag{
+ Name: "blob-cache",
+ Value: "",
+ Usage: "assume image blobs in the specified directory will be available for pushing",
+ Hidden: true, // this is here mainly so that we can test the API during integration tests
+ },
cli.StringSliceFlag{
Name: "cap-add",
Usage: "add the specified capability when running (default [])",
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index b87eb95c7..41fdea8b1 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -282,7 +282,7 @@ func SystemContextFromOptions(c *cli.Context) (*types.SystemContext, error) {
DockerCertPath: c.String("cert-dir"),
}
if c.IsSet("tls-verify") {
- ctx.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify")
+ ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
ctx.OCIInsecureSkipTLSVerify = !c.BoolT("tls-verify")
ctx.DockerDaemonInsecureSkipTLSVerify = !c.BoolT("tls-verify")
}
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 52269541a..e677c8925 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -5,6 +5,7 @@ import (
"io"
"strings"
+ "github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
@@ -40,6 +41,10 @@ type PullOptions struct {
// image name alone can not be resolved to a reference to a source
// image. No separator is implicitly added.
Transport string
+ // BlobDirectory is the name of a directory in which we'll attempt to
+ // store copies of layer blobs that we pull down, if any. It should
+ // already exist.
+ BlobDirectory string
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) {
@@ -146,11 +151,11 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
srcRef, err := alltransports.ParseImageName(spec)
if err != nil {
if options.Transport == "" {
- options.Transport = DefaultTransport
+ options.Transport = util.DefaultTransport
}
logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, options.Transport, err)
transport := options.Transport
- if transport != DefaultTransport {
+ if transport != util.DefaultTransport {
transport = transport + ":"
}
spec = transport + spec
@@ -182,6 +187,14 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
if err != nil {
return nil, errors.Wrapf(err, "error parsing image name %q", destName)
}
+ var maybeCachedDestRef types.ImageReference = destRef
+ if options.BlobDirectory != "" {
+ cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(destRef), options.BlobDirectory)
+ }
+ maybeCachedDestRef = cachedRef
+ }
policy, err := signature.DefaultPolicy(sc)
if err != nil {
@@ -200,7 +213,8 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
}()
logrus.Debugf("copying %q to %q", spec, destName)
- if _, err := cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, destRef, nil, "")); err != nil {
+ if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil {
+ logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", spec, destName, err)
return nil, err
}
return destRef, nil
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index 718ef4e36..5d2cd6a32 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -451,7 +451,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
copyWithTar := b.copyWithTar(nil, nil)
- builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes)
+ builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
if err != nil {
return err
}
@@ -493,15 +493,21 @@ func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts
return mounts, nil
}
-func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string) ([]specs.Mount, error) {
+func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
var mounts []specs.Mount
+ hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
for _, volume := range builtinVolumes {
subdir := digest.Canonical.FromString(volume).Hex()
volumePath := filepath.Join(containerDir, "buildah-volumes", subdir)
+ srcPath := filepath.Join(mountPoint, volume)
+ initializeVolume := false
// If we need to, initialize the volume path's initial contents.
- if _, err := os.Stat(volumePath); err != nil && os.IsNotExist(err) {
+ if _, err := os.Stat(volumePath); err != nil {
+ if !os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "failed to stat %q for volume %q", volumePath, volume)
+ }
logrus.Debugf("setting up built-in volume at %q", volumePath)
if err = os.MkdirAll(volumePath, 0755); err != nil {
return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume)
@@ -509,11 +515,21 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWit
if err = label.Relabel(volumePath, mountLabel, false); err != nil {
return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume)
}
- srcPath := filepath.Join(mountPoint, volume)
- stat, err := os.Stat(srcPath)
- if err != nil {
+ initializeVolume = true
+ }
+ stat, err := os.Stat(srcPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
}
+ if err = idtools.MkdirAllAndChownNew(srcPath, 0755, hostOwner); err != nil {
+ return nil, errors.Wrapf(err, "error creating directory %q for volume %q", srcPath, volume)
+ }
+ if stat, err = os.Stat(srcPath); err != nil {
+ return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
+ }
+ }
+ if initializeVolume {
if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
return nil, errors.Wrapf(err, "failed to chmod %q for volume %q", volumePath, volume)
}
@@ -1044,24 +1060,31 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
- hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair)
- if err != nil {
- return err
- }
- resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
- if err != nil {
- return err
- }
+ bindFiles := make(map[string]string)
+ namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...)
+ volumes := b.Volumes()
- if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
- return err
+ if !contains(volumes, "/etc/hosts") {
+ hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair)
+ if err != nil {
+ return err
+ }
+ bindFiles["/etc/hosts"] = hostFile
+
+ if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
+ return err
+ }
}
- bindFiles := map[string]string{
- "/etc/hosts": hostFile,
- "/etc/resolv.conf": resolvFile,
+ if !contains(volumes, "/etc/resolv.conf") {
+ resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
+ if err != nil {
+ return err
+ }
+ bindFiles["/etc/resolv.conf"] = resolvFile
}
- err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, append(b.NamespaceOptions, options.NamespaceOptions...))
+
+ err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions)
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID)
}
@@ -1081,41 +1104,35 @@ func (b *Builder) Run(command []string, options RunOptions) error {
switch isolation {
case IsolationOCI:
- // The default is --rootless=auto, which makes troubleshooting a bit harder.
- // rootlessFlag := []string{"--rootless=false"}
- // for _, arg := range options.Args {
- // if strings.HasPrefix(arg, "--rootless") {
- // rootlessFlag = nil
- // }
- // }
- // options.Args = append(options.Args, rootlessFlag...)
var moreCreateArgs []string
if options.NoPivot {
moreCreateArgs = []string{"--no-pivot"}
} else {
moreCreateArgs = nil
}
- err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
case IsolationChroot:
err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr)
case IsolationOCIRootless:
if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil {
return err
}
- rootlessFlag := []string{"--rootless=true"}
- for _, arg := range options.Args {
- if strings.HasPrefix(arg, "--rootless") {
- rootlessFlag = nil
- }
- }
- options.Args = append(options.Args, rootlessFlag...)
- err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
default:
err = errors.Errorf("don't know how to run this command")
}
return err
}
+func contains(volumes []string, v string) bool {
+ for _, i := range volumes {
+ if i == v {
+ return true
+ }
+ }
+ return false
+}
+
func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) error {
switch isolation {
case IsolationOCIRootless:
@@ -1123,10 +1140,22 @@ func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions)
logrus.Debugf("Forcing use of an IPC namespace.")
}
options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.IPCNamespace)})
- if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host {
- logrus.Debugf("Disabling network namespace.")
+ _, err := exec.LookPath("slirp4netns")
+ hostNetworking := err != nil
+ networkNamespacePath := ""
+ if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
+ hostNetworking = ns.Host
+ networkNamespacePath = ns.Path
+ if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) {
+ logrus.Debugf("Disabling network namespace configuration.")
+ networkNamespacePath = ""
+ }
}
- options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
+ options.NamespaceOptions.AddOrReplace(NamespaceOption{
+ Name: string(specs.NetworkNamespace),
+ Host: hostNetworking,
+ Path: networkNamespacePath,
+ })
if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host {
logrus.Debugf("Forcing use of a PID namespace.")
}
@@ -1227,9 +1256,10 @@ type runUsingRuntimeSubprocOptions struct {
ConfigureNetworks []string
MoreCreateArgs []string
ContainerName string
+ Isolation Isolation
}
-func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
+func (b *Builder) runUsingRuntimeSubproc(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
var confwg sync.WaitGroup
config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
Options: options,
@@ -1240,6 +1270,7 @@ func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bo
ConfigureNetworks: configureNetworks,
MoreCreateArgs: moreCreateArgs,
ContainerName: containerName,
+ Isolation: isolation,
})
if conferr != nil {
return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand)
@@ -1318,7 +1349,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
// Run the container, start to finish.
- status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
+ status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
if err != nil {
fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
os.Exit(1)
@@ -1333,7 +1364,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
-func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
+func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
// Lock the caller to a single OS-level thread.
runtime.LockOSThread()
@@ -1490,7 +1521,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetwork
}()
if configureNetwork {
- teardown, err := runConfigureNetwork(options, configureNetworks, pid, containerName, spec.Process.Args)
+ teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, spec.Process.Args)
if teardown != nil {
defer teardown()
}
@@ -1623,9 +1654,81 @@ func runCollectOutput(fds, closeBeforeReadingFds []int) string {
}
return b.String()
}
+func setupRootlessNetwork(pid int) (teardown func(), err error) {
+ slirp4netns, err := exec.LookPath("slirp4netns")
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot find slirp4netns")
+ }
+
+ rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe")
+ }
+ defer rootlessSlirpSyncR.Close()
+
+ // Be sure there are no fds inherited to slirp4netns except the sync pipe
+ files, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot list open fds")
+ }
+ for _, f := range files {
+ fd, err := strconv.Atoi(f.Name())
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot parse fd")
+ }
+ if fd == int(rootlessSlirpSyncW.Fd()) {
+ continue
+ }
+ unix.CloseOnExec(fd)
+ }
+
+ cmd := exec.Command(slirp4netns, "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0")
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil
+ cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW}
+
+ err = cmd.Start()
+ rootlessSlirpSyncW.Close()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot start slirp4netns")
+ }
-func runConfigureNetwork(options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) {
+ b := make([]byte, 1)
+ for {
+ if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil {
+ return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout")
+ }
+ if _, err := rootlessSlirpSyncR.Read(b); err == nil {
+ break
+ } else {
+ if os.IsTimeout(err) {
+ // Check if the process is still running.
+ var status syscall.WaitStatus
+ _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to read slirp4netns process status")
+ }
+ if status.Exited() || status.Signaled() {
+ return nil, errors.New("slirp4netns failed")
+ }
+
+ continue
+ }
+ return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe")
+ }
+ }
+
+ return func() {
+ cmd.Process.Kill()
+ cmd.Wait()
+ }, nil
+}
+
+func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) {
var netconf, undo []*libcni.NetworkConfigList
+
+ if isolation == IsolationOCIRootless {
+ return setupRootlessNetwork(pid)
+ }
// Scan for CNI configuration files.
confdir := options.CNIConfigDir
files, err := libcni.ConfFiles(confdir, []string{".conf"})
@@ -1956,7 +2059,7 @@ func runAcceptTerminal(consoleListener *net.UnixListener, terminalSize *specs.Bo
for i := range scm {
fds, err := unix.ParseUnixRights(&scm[i])
if err != nil {
- return -1, errors.Wrapf(err, "error parsing unix rights control message: %v")
+ return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i])
}
logrus.Debugf("fds: %v", fds)
if len(fds) == 0 {
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go
index fbe623660..2a970b8d6 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/unshare/unshare.go
@@ -55,6 +55,10 @@ func (c *Cmd) Start() error {
}
c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags))
+ // Please the libpod "rootless" package to find the expected env variables.
+ c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done")
+ c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid()))
+
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 09aa7e1eb..66a4e535a 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -175,11 +175,11 @@ func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) {
// isRegistryInsecure checks if the named registry is marked as not secure
func isRegistryInsecure(registry string, sc *types.SystemContext) (bool, error) {
- registries, err := sysregistriesv2.GetRegistries(sc)
+ reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
if err != nil {
return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistries.RegistriesConfPath(sc))
}
- if reginfo := sysregistriesv2.FindRegistry(registry, registries); reginfo != nil {
+ if reginfo != nil {
if reginfo.Insecure {
logrus.Debugf("registry %q is marked insecure in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc))
} else {
@@ -193,11 +193,11 @@ func isRegistryInsecure(registry string, sc *types.SystemContext) (bool, error)
// isRegistryBlocked checks if the named registry is marked as blocked
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
- registries, err := sysregistriesv2.GetRegistries(sc)
+ reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
if err != nil {
return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistries.RegistriesConfPath(sc))
}
- if reginfo := sysregistriesv2.FindRegistry(registry, registries); reginfo != nil {
+ if reginfo != nil {
if reginfo.Blocked {
logrus.Debugf("registry %q is marked as blocked in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc))
} else {
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 3a415a7f3..427c8db28 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -7,10 +7,8 @@ import (
"net/url"
"os"
"path"
- "path/filepath"
"strconv"
"strings"
- "syscall"
"github.com/containers/image/directory"
dockerarchive "github.com/containers/image/docker/archive"
@@ -31,6 +29,10 @@ import (
const (
minimumTruncatedIDLength = 3
+ // DefaultTransport is a prefix that we apply to an image name if we
+ // can't find one in the local Store, in order to generate a source
+ // reference for the image that we can then copy to the local Store.
+ DefaultTransport = "docker://"
)
var (
@@ -89,6 +91,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
}
}
+ name = strings.TrimPrefix(name, DefaultTransport)
// If the image name already included a domain component, we're done.
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
@@ -119,12 +122,11 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
// Figure out the list of registries.
var registries []string
- allRegistries, err := sysregistriesv2.GetRegistries(sc)
+ searchRegistries, err := sysregistriesv2.FindUnqualifiedSearchRegistries(sc)
if err != nil {
logrus.Debugf("unable to read configured registries to complete %q: %v", name, err)
- registries = []string{}
}
- for _, registry := range sysregistriesv2.FindUnqualifiedSearchRegistries(allRegistries) {
+ for _, registry := range searchRegistries {
if !registry.Blocked {
registries = append(registries, registry.URL)
}
@@ -450,60 +452,6 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap,
return uid, gid, nil
}
-// UnsharedRootPath returns a location under ($XDG_DATA_HOME/containers/storage,
-// or $HOME/.local/share/containers/storage, or
-// (the user's home directory)/.local/share/containers/storage, or an error.
-func UnsharedRootPath(homedir string) (string, error) {
- // If $XDG_DATA_HOME is defined...
- if envDataHome, haveDataHome := os.LookupEnv("XDG_DATA_HOME"); haveDataHome {
- return filepath.Join(envDataHome, "containers", "storage"), nil
- }
- // If $XDG_DATA_HOME is not defined, but $HOME is defined...
- if envHomedir, haveHomedir := os.LookupEnv("HOME"); haveHomedir {
- // Default to the user's $HOME/.local/share/containers/storage subdirectory.
- return filepath.Join(envHomedir, ".local", "share", "containers", "storage"), nil
- }
- // If we know where our home directory is...
- if homedir != "" {
- // Default to the user's homedir/.local/share/containers/storage subdirectory.
- return filepath.Join(homedir, ".local", "share", "containers", "storage"), nil
- }
- return "", errors.New("unable to determine a --root location: neither $XDG_DATA_HOME nor $HOME is set")
-}
-
-// UnsharedRunrootPath returns $XDG_RUNTIME_DIR/run, /var/run/user/(the user's UID)/run, or an error.
-func UnsharedRunrootPath(uid string) (string, error) {
- // If $XDG_RUNTIME_DIR is defined...
- if envRuntimeDir, haveRuntimeDir := os.LookupEnv("XDG_RUNTIME_DIR"); haveRuntimeDir {
- return filepath.Join(envRuntimeDir, "run"), nil
- }
- var runtimeDir string
- // If $XDG_RUNTIME_DIR is not defined, but we know our UID...
- if uid != "" {
- tmpDir := filepath.Join("/var/run/user", uid)
- os.MkdirAll(tmpDir, 0700)
- st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Getuid() && st.Mode().Perm() == 0700 {
- runtimeDir = tmpDir
- }
- }
- if runtimeDir == "" {
- home := os.Getenv("HOME")
- if home == "" {
- return "", errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- }
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", errors.Wrapf(err, "cannot resolve %s", home)
- }
- runtimeDir = filepath.Join(resolvedHome, "rundir")
- }
- if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
- return "", errors.New("could not set XDG_RUNTIME_DIR")
- }
- return runtimeDir, nil
-}
-
// GetPolicyContext sets up, initializes and returns a new context for the specified policy
func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) {
policy, err := signature.DefaultPolicy(ctx)
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index d79412afc..61325114c 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -3,9 +3,10 @@ github.com/blang/semver master
github.com/BurntSushi/toml master
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
-github.com/containers/image 5e5b67d6b1cf43cc349128ec3ed7d5283a6cc0d1
-github.com/containers/libpod 2afadeec6696fefac468a49c8ba24b0bc275aa75
-github.com/containers/storage 41294c85d97bef688e18f710402895dbecde3308
+github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067
+github.com/boltdb/bolt master
+github.com/containers/libpod fe4f09493f41f675d24c969d1b60d1a6a45ddb9e
+github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
@@ -36,9 +37,9 @@ github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc master
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-tools master
-github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
+github.com/opencontainers/selinux master
github.com/openshift/imagebuilder master
-github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
+github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
github.com/pborman/uuid master
github.com/pkg/errors master
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index 313d802b3..013080e8d 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -13,6 +13,7 @@ import (
"time"
"github.com/containers/image/image"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/signature"
"github.com/containers/image/transports"
@@ -24,14 +25,16 @@ import (
)
type digestingReader struct {
- source io.Reader
- digester digest.Digester
- expectedDigest digest.Digest
- validationFailed bool
+ source io.Reader
+ digester digest.Digester
+ expectedDigest digest.Digest
+ validationFailed bool
+ validationSucceeded bool
}
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
-// and set validationFailed to true if the source stream does not match expectedDigest.
+// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
+// (neither is set if EOF is never reached).
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
if err := expectedDigest.Validate(); err != nil {
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
@@ -64,6 +67,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
d.validationFailed = true
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
}
+ d.validationSucceeded = true
}
return n, err
}
@@ -71,21 +75,22 @@ func (d *digestingReader) Read(p []byte) (int, error) {
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {
- cachedDiffIDs map[digest.Digest]digest.Digest
dest types.ImageDestination
rawSource types.ImageSource
reportWriter io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
+ blobInfoCache types.BlobInfoCache
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
- c *copier
- manifestUpdates *types.ManifestUpdateOptions
- src types.Image
- diffIDsAreNeeded bool
- canModifyManifest bool
+ c *copier
+ manifestUpdates *types.ManifestUpdateOptions
+ src types.Image
+ diffIDsAreNeeded bool
+ canModifyManifest bool
+ canSubstituteBlobs bool
}
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
@@ -141,12 +146,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}()
c := &copier{
- cachedDiffIDs: make(map[digest.Digest]digest.Digest),
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
progressInterval: options.ProgressInterval,
progress: options.Progress,
+ // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
+ // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
+ // we might want to add a separate CommonCtx — or would that be too confusing?
+ blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
}
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
@@ -235,6 +243,13 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
src: src,
// diffIDsAreNeeded is computed later
canModifyManifest: len(sigs) == 0,
+ // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
+ canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
}
if err := ic.updateEmbeddedDockerReference(); err != nil {
@@ -498,32 +513,24 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
- // Check if we already have a blob with this digest
- haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo)
- if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
- }
- // If we already have a cached diffID for this blob, we don't need to compute it
- diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
- // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
- if haveBlob && !diffIDIsNeeded {
- // Check the blob sizes match, if we were given a size this time
- if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
- return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
- }
- srcInfo.Size = extantBlobSize
- // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
- blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo)
+ cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
+ diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
+
+ // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
+ if !diffIDIsNeeded {
+ reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
+ }
+ if reused {
+ ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
+ return blobInfo, cachedDiffID, nil
}
- ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
- return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
}
// Fallback: copy the layer, computing the diffID if we need to do so
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
- srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo)
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
@@ -543,11 +550,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
return blobInfo, diffIDResult.digest, nil
}
} else {
- return blobInfo, ic.c.cachedDiffIDs[srcInfo.Digest], nil
+ return blobInfo, cachedDiffID, nil
}
}
@@ -624,7 +633,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
- // Note that we don't use a stronger "validationSucceeded" indicator, because
+ // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
@@ -660,8 +669,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
+ var compressionOperation types.LayerCompression
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
+ compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
@@ -674,6 +685,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Size = -1
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
+ compressionOperation = types.Decompress
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
@@ -684,6 +696,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Size = -1
} else {
logrus.Debugf("Using original blob without modification")
+ compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
}
@@ -699,7 +712,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// === Finally, send the layer stream to dest.
- uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig)
+ uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
}
@@ -722,6 +735,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
+ if digestingReader.validationSucceeded {
+ // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
+ // (because inputInfo.Digest == "", this must have been computed afresh).
+ switch compressionOperation {
+ case types.PreserveOriginal:
+ break // Do nothing, we have only one digest and we might not have even verified it.
+ case types.Compress:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ case types.Decompress:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ default:
+ return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
+ }
+ }
return uploadedInfo, nil
}
diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go
index d888931fe..d75c195b2 100644
--- a/vendor/github.com/containers/image/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/directory/directory_dest.go
@@ -127,10 +127,11 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -169,27 +170,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, -1, nil
+ return false, types.BlobInfo{}, nil
}
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, finfo.Size(), nil
-}
+ return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
-func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go
index 5e17c37c0..3625def80 100644
--- a/vendor/github.com/containers/image/directory/directory_src.go
+++ b/vendor/github.com/containers/image/directory/directory_src.go
@@ -49,7 +49,9 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest))
if err != nil {
return nil, -1, err
diff --git a/vendor/github.com/containers/image/docker/cache.go b/vendor/github.com/containers/image/docker/cache.go
new file mode 100644
index 000000000..64ad57a7c
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/cache.go
@@ -0,0 +1,23 @@
+package docker
+
+import (
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+)
+
+// bicTransportScope returns a BICTransportScope appropriate for ref.
+func bicTransportScope(ref dockerReference) types.BICTransportScope {
+ // Blobs can be reused across the whole registry.
+ return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
+}
+
+// newBICLocationReference returns a BICLocationReference appropriate for ref.
+func newBICLocationReference(ref dockerReference) types.BICLocationReference {
+ // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
+ return types.BICLocationReference{Opaque: ref.ref.Name()}
+}
+
+// parseBICLocationReference returns a repository for encoded lr.
+func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
+ return reference.ParseNormalizedNamed(lr.Opaque)
+}
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
index 6d2c5b670..7f55dbe7f 100644
--- a/vendor/github.com/containers/image/docker/docker_client.go
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/pkg/docker/config"
+ "github.com/containers/image/pkg/sysregistriesv2"
"github.com/containers/image/pkg/tlsclientconfig"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/client"
@@ -69,30 +70,33 @@ type extensionSignatureList struct {
}
type bearerToken struct {
- Token string `json:"token"`
- AccessToken string `json:"access_token"`
- ExpiresIn int `json:"expires_in"`
- IssuedAt time.Time `json:"issued_at"`
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+ expirationTime time.Time
}
// dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct {
// The following members are set by newDockerClient and do not change afterwards.
- sys *types.SystemContext
- registry string
+ sys *types.SystemContext
+ registry string
+ client *http.Client
+ insecureSkipTLSVerify bool
+ // The following members are not set by newDockerClient and must be set by callers if needed.
username string
password string
- client *http.Client
signatureBase signatureStorageBase
scope authScope
+ extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo)
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
challenges []challenge
supportsSignatures bool
- // The following members are private state for setupRequestAuth, both are valid if token != nil.
- token *bearerToken
- tokenExpiration time.Time
+ // Private state for setupRequestAuth
+ tokenCache map[string]bearerToken
}
type authScope struct {
@@ -128,6 +132,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
+ token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
return token, nil
}
@@ -194,13 +199,26 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
if err != nil {
return nil, err
}
- remoteName := reference.Path(ref.ref)
- return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName)
+ client, err := newDockerClient(sys, registry, ref.ref.Name())
+ if err != nil {
+ return nil, err
+ }
+ client.username = username
+ client.password = password
+ client.signatureBase = sigBase
+ client.scope.actions = actions
+ client.scope.remoteName = reference.Path(ref.ref)
+ return client, nil
}
-// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
-func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
+// newDockerClient returns a new dockerClient instance for the given registry
+// and reference. The reference is used to query the registry configuration
+// and can either be a registry (e.g, "registry.com[:5000]"), a repository
+// (e.g., "registry.com[:5000][/some/namespace]/repo").
+// Please note that newDockerClient does not set all members of dockerClient
+// (e.g., username and password); those must be set by callers if necessary.
+func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
hostName := registry
if registry == dockerHostname {
registry = dockerRegistry
@@ -221,33 +239,44 @@ func newDockerClientWithDetails(sys *types.SystemContext, registry, username, pa
return nil, err
}
- if sys != nil && sys.DockerInsecureSkipTLSVerify {
- tr.TLSClientConfig.InsecureSkipVerify = true
+ // Check if TLS verification shall be skipped (default=false) which can
+ // either be specified in the sysregistriesv2 configuration or via the
+ // SystemContext, whereas the SystemContext is prioritized.
+ skipVerify := false
+ if sys != nil && sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ // Only use the SystemContext if the actual value is defined.
+ skipVerify = sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
+ } else {
+ reg, err := sysregistriesv2.FindRegistry(sys, reference)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error loading registries")
+ }
+ if reg != nil {
+ skipVerify = reg.Insecure
+ }
}
+ tr.TLSClientConfig.InsecureSkipVerify = skipVerify
return &dockerClient{
- sys: sys,
- registry: registry,
- username: username,
- password: password,
- client: &http.Client{Transport: tr},
- signatureBase: sigBase,
- scope: authScope{
- actions: actions,
- remoteName: remoteName,
- },
+ sys: sys,
+ registry: registry,
+ client: &http.Client{Transport: tr},
+ insecureSkipTLSVerify: skipVerify,
+ tokenCache: map[string]bearerToken{},
}, nil
}
// CheckAuth validates the credentials by attempting to log into the registry
// returns an error if an error occcured while making the http request or the status code received was 401
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
- newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
+ client, err := newDockerClient(sys, registry, registry)
if err != nil {
return errors.Wrapf(err, "error creating new docker client")
}
+ client.username = username
+ client.password = password
- resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
+ resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
if err != nil {
return err
}
@@ -299,16 +328,21 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
return nil, errors.Wrapf(err, "error getting username and password")
}
- // The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
- // So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
+ // The /v2/_catalog endpoint has been disabled for docker.io therefore
+ // the call made to that endpoint will fail. So using the v1 hostname
+ // for docker.io for simplicity of implementation and the fact that it
+ // returns search results.
+ hostname := registry
if registry == dockerHostname {
- registry = dockerV1Hostname
+ hostname = dockerV1Hostname
}
- client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
+ client, err := newDockerClient(sys, hostname, registry)
if err != nil {
return nil, errors.Wrapf(err, "error creating new docker client")
}
+ client.username = username
+ client.password = password
// Only try the v1 search endpoint if the search query is not empty. If it is
// empty skip to the v2 endpoint.
@@ -432,24 +466,23 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
- if c.token == nil || time.Now().After(c.tokenExpiration) {
- realm, ok := challenge.Parameters["realm"]
- if !ok {
- return errors.Errorf("missing realm in bearer auth challenge")
- }
- service, _ := challenge.Parameters["service"] // Will be "" if not present
- var scope string
- if c.scope.remoteName != "" && c.scope.actions != "" {
- scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
- }
- token, err := c.getBearerToken(req.Context(), realm, service, scope)
+ cacheKey := ""
+ scopes := []authScope{c.scope}
+ if c.extraScope != nil {
+ // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
+ cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
+ scopes = append(scopes, *c.extraScope)
+ }
+ token, ok := c.tokenCache[cacheKey]
+ if !ok || time.Now().After(token.expirationTime) {
+ t, err := c.getBearerToken(req.Context(), challenge, scopes)
if err != nil {
return err
}
- c.token = token
- c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+ token = *t
+ c.tokenCache[cacheKey] = token
}
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
return nil
default:
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
@@ -459,7 +492,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
return nil
}
-func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
+func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) {
+ realm, ok := challenge.Parameters["realm"]
+ if !ok {
+ return nil, errors.Errorf("missing realm in bearer auth challenge")
+ }
+
authReq, err := http.NewRequest("GET", realm, nil)
if err != nil {
return nil, err
@@ -469,11 +507,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
if c.username != "" {
getParams.Add("account", c.username)
}
- if service != "" {
+ if service, ok := challenge.Parameters["service"]; ok && service != "" {
getParams.Add("service", service)
}
- if scope != "" {
- getParams.Add("scope", scope)
+ for _, scope := range scopes {
+ if scope.remoteName != "" && scope.actions != "" {
+ getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
+ }
}
authReq.URL.RawQuery = getParams.Encode()
if c.username != "" && c.password != "" {
@@ -530,7 +570,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return nil
}
err := ping("https")
- if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
+ if err != nil && c.insecureSkipTLSVerify {
err = ping("http")
}
if err != nil {
@@ -554,7 +594,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return true
}
isV1 := pingV1("https")
- if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
+ if !isV1 && c.insecureSkipTLSVerify {
isV1 = pingV1("http")
}
if isV1 {
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index 9bbffef93..2f471f648 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
@@ -113,17 +114,21 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
- haveBlob, size, err := d.HasBlob(ctx, inputInfo)
+ // This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
+ // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
+ // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
+ haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
if err != nil {
return types.BlobInfo{}, err
}
if haveBlob {
- return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ return reusedInfo, nil
}
}
@@ -160,7 +165,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
- // FIXME: DELETE uploadLocation on failure
+ // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
@@ -177,19 +182,15 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
+// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
-func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
- }
- checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
-
+func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) {
+ checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
@@ -202,7 +203,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
- return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
+ return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
@@ -211,8 +212,134 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
}
}
-func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
+func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error {
+ u := url.URL{
+ Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
+ RawQuery: url.Values{
+ "mount": {srcDigest.String()},
+ "from": {reference.Path(srcRepo)},
+ }.Encode(),
+ }
+ mountPath := u.String()
+ logrus.Debugf("Trying to mount %s", mountPath)
+ res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusCreated:
+ logrus.Debugf("... mount OK")
+ return nil
+ case http.StatusAccepted:
+ // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
+ // Abort, and let the ultimate caller do an upload when its ready, instead.
+ // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return errors.Wrap(err, "Error determining upload URL after a mount attempt")
+ }
+ logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
+ res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth)
+ if err != nil {
+ logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
+ } else {
+ defer res2.Body.Close()
+ if res2.StatusCode != http.StatusNoContent {
+ logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
+ }
+ }
+ // Anyway, if canceling the upload fails, ignore it and return the more important error:
+ return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ default:
+ logrus.Debugf("Error mounting, response %#v", *res)
+ return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ }
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ if info.Digest == "" {
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ }
+
+ // First, check whether the blob happens to already exist at the destination.
+ exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ if exists {
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
+ }
+
+ // Then try reusing blobs from other locations.
+
+ // Checking candidateRepo, and mounting from it, requires an expanded token scope.
+ // We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack.
+ if d.c.extraScope != nil {
+ return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob")
+ }
+ defer func() {
+ d.c.extraScope = nil
+ }()
+ for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
+ candidateRepo, err := parseBICLocationReference(candidate.Location)
+ if err != nil {
+ logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
+ continue
+ }
+ logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
+
+ // Sanity checks:
+ if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
+ logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
+ continue
+ }
+ if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
+ logrus.Debug("... Already tried the primary destination")
+ continue
+ }
+
+ // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
+ d.c.extraScope = &authScope{
+ remoteName: reference.Path(candidateRepo),
+ actions: "pull",
+ }
+ // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
+ // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
+ // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
+ // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
+ // Even worse, docker/distribution does not actually reasonably implement canceling uploads
+ // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
+ // so, be a nice client and don't create unnecesary upload sessions on the server.
+ exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest)
+ if err != nil {
+ logrus.Debugf("... Failed: %v", err)
+ continue
+ }
+ if !exists {
+ // FIXME? Should we drop the blob from cache here (and elsewhere?)?
+ continue // logrus.Debug() already happened in blobExists
+ }
+ if candidateRepo.Name() != d.ref.ref.Name() {
+ if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil {
+ logrus.Debugf("... Mount failed: %v", err)
+ continue
+ }
+ }
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
+ }
+
+ return false, types.BlobInfo{}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index aedab9731..fbed6297f 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -162,7 +162,9 @@ func getBlobSize(resp *http.Response) int64 {
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
return s.getExternalBlob(ctx, info.URLs)
}
@@ -177,6 +179,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
// print url also
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
}
+ cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
return res.Body, getBlobSize(res), nil
}
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
index d6510ccf1..ad8a48a03 100644
--- a/vendor/github.com/containers/image/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -85,10 +85,11 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
@@ -120,12 +121,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
}
// Maybe the blob has been already sent
- ok, size, err := d.HasBlob(ctx, inputInfo)
+ ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
- return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ return reusedInfo, nil
}
if isConfig {
@@ -151,29 +152,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with
-// the matching digest which can be reapplied using ReapplyBlob. Unlike
-// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
-// the blob must also be returned. If the destination does not contain the
-// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
-// returns a non-nil error only on an unexpected failure.
-func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
+ return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := d.blobs[info.Digest]; ok {
- return true, blob.Size, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
- return false, -1, nil
-}
-
-// ReapplyBlob informs the image destination that a blob for which HasBlob
-// previously returned true would have been passed to PutBlob if it had
-// returned false. Like HasBlob and unlike PutBlob, the digest can not be
-// empty. If the blob is a filesystem layer, this signifies that the changes
-// it describes need to be applied again when composing a filesystem tree.
-func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return false, types.BlobInfo{}, nil
}
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index 942893a81..d94ed9783 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -398,7 +398,9 @@ func (r uncompressedReadCloser) Close() error {
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, 0, err
}
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
index b639ab714..cee60f824 100644
--- a/vendor/github.com/containers/image/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -95,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
if err != nil {
return nil, err
}
@@ -249,7 +250,9 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
if historyEntry.EmptyLayer {
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
- info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
+ // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
+ // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "Error uploading empty layer")
}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
index 298db360d..6fe2a9a32 100644
--- a/vendor/github.com/containers/image/image/oci.go
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go
index 3c6b7dffa..3997ac2ee 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go
@@ -77,20 +77,27 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.unpackedDest.IgnoresEmbeddedDockerReference()
}
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
-func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
- return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
-func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- return d.unpackedDest.HasBlob(ctx, info)
-}
-
-func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return d.unpackedDest.ReapplyBlob(ctx, info)
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination
diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go
index d04773c1f..084d818f7 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_src.go
@@ -76,9 +76,11 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
- return s.unpackedSrc.GetBlob(ctx, info)
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ return s.unpackedSrc.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go
index 351632750..b5a6e08f8 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go
@@ -107,13 +107,15 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -173,30 +175,29 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *ociImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, -1, nil
+ return false, types.BlobInfo{}, nil
}
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, finfo.Size(), nil
-}
-
-func (d *ociImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go
index 33115c00d..086a7040d 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_src.go
@@ -92,8 +92,10 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
return s.getExternalBlob(ctx, info.URLs)
}
diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go
index dbd04f10b..0cce1e6c7 100644
--- a/vendor/github.com/containers/image/openshift/openshift.go
+++ b/vendor/github.com/containers/image/openshift/openshift.go
@@ -212,11 +212,13 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureImageIsResolved(ctx); err != nil {
return nil, 0, err
}
- return s.docker.GetBlob(ctx, info)
+ return s.docker.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
@@ -379,23 +381,23 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
- return d.docker.PutBlob(ctx, stream, inputInfo, isConfig)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *openshiftImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- return d.docker.HasBlob(ctx, info)
-}
-
-func (d *openshiftImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return d.docker.ReapplyBlob(ctx, info)
+func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
index afff7dc1b..064898948 100644
--- a/vendor/github.com/containers/image/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -132,7 +132,15 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
-func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
@@ -322,12 +330,18 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
-func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
-
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
- return false, 0, err
+ return false, types.BlobInfo{}, err
}
d.repo = repo
}
@@ -335,29 +349,25 @@ func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInf
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, size, nil
-}
-
-func (d *ostreeImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
index 1f325b2a7..e73cae198 100644
--- a/vendor/github.com/containers/image/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -255,8 +255,10 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path)
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
blob := info.Digest.Hex()
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
new file mode 100644
index 000000000..4ee809134
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
@@ -0,0 +1,329 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/boltdb/bolt"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
+ // we can simply start over with a different filename; update blobInfoCacheFilename.
+
+ // FIXME: For CRI-O, does this need to hide information between different users?
+
+ // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
+ uncompressedDigestBucket = []byte("uncompressedDigest")
+ // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
+ // (as a set of key=digest, value="" pairs)
+ digestByUncompressedBucket = []byte("digestByUncompressed")
+ // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
+ // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
+ knownLocationsBucket = []byte("knownLocations")
+)
+
+// Concurrency:
+// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
+// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
+
+// pathLock contains a lock for a specific BoltDB database path.
+type pathLock struct {
+ refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
+ mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
+}
+
+var (
+ // pathLocks contains a lock for each currently open file.
+ // This must be global so that independently created instances of boltDBCache exclude each other.
+ // The map is protected by pathLocksMutex.
+ // FIXME? Should this be based on device:inode numbers instead of paths instead?
+ pathLocks = map[string]*pathLock{}
+ pathLocksMutex = sync.Mutex{}
+)
+
+// lockPath obtains the pathLock for path.
+// The caller must call unlockPath eventually.
+func lockPath(path string) {
+ pl := func() *pathLock { // A scope for defer
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if ok {
+ pl.refCount++
+ } else {
+ pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
+ pathLocks[path] = pl
+ }
+ return pl
+ }()
+ pl.mutex.Lock()
+}
+
+// unlockPath releases the pathLock for path.
+func unlockPath(path string) {
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if !ok {
+ // Should this return an error instead? BlobInfoCache ultimately ignores errors…
+ panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
+ }
+ pl.mutex.Unlock()
+ pl.refCount--
+ if pl.refCount == 0 {
+ delete(pathLocks, path)
+ }
+}
+
+// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
+//
+// Note that we don’t keep the database open across operations, because that would lock the file and block any other
+// users; instead, we need to open/close it for every single write or lookup.
+type boltDBCache struct {
+ path string
+}
+
+// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
+// Most users should call DefaultCache instead.
+func NewBoltDBCache(path string) types.BlobInfoCache {
+ return &boltDBCache{path: path}
+}
+
+// view returns runs the specified fn within a read-only transaction on the database.
+func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
+ // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
+ // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
+ // a read lock, blocking any future writes.
+ // Hence this preliminary check, which is RACY: Another process could remove the file
+ // between the Lstat call and opening the database.
+ if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
+ return err
+ }
+
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.View(fn)
+}
+
+// update returns runs the specified fn within a read-write transaction on the database.
+func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, nil)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.Update(fn)
+}
+
+// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
+func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
+ if b := tx.Bucket(uncompressedDigestBucket); b != nil {
+ if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
+ d, err := digest.Parse(string(uncompressedBytes))
+ if err == nil {
+ return d
+ }
+ // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ }
+ // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if b := tx.Bucket(digestByUncompressedBucket); b != nil {
+ if b = b.Bucket([]byte(anyDigest.String())); b != nil {
+ c := b.Cursor()
+ if k, _ := c.First(); k != nil { // The bucket is non-empty
+ return anyDigest
+ }
+ }
+ }
+ return ""
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ var res digest.Digest
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ res = bdc.uncompressedDigest(tx, anyDigest)
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return res
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
+ if err != nil {
+ return err
+ }
+ key := []byte(anyDigest.String())
+ if previousBytes := b.Get(key); previousBytes != nil {
+ previous, err := digest.Parse(string(previousBytes))
+ if err != nil {
+ return err
+ }
+ if previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ }
+ if err := b.Put(key, []byte(uncompressed.String())); err != nil {
+ return err
+ }
+
+ b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
+ if err != nil {
+ return err
+ }
+ value, err := time.Now().MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
+func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
+ b := scopeBucket.Bucket([]byte(digest.String()))
+ if b == nil {
+ return candidates
+ }
+ _ = b.ForEach(func(k, v []byte) error {
+ t := time.Time{}
+ if err := t.UnmarshalBinary(v); err != nil {
+ return err
+ }
+ candidates = append(candidates, candidateWithTime{
+ candidate: types.BICReplacementCandidate{
+ Digest: digest,
+ Location: types.BICLocationReference{Opaque: string(k)},
+ },
+ lastSeen: t,
+ })
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []candidateWithTime{}
+ var uncompressedDigestValue digest.Digest // = ""
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ scopeBucket := tx.Bucket(knownLocationsBucket)
+ if scopeBucket == nil {
+ return nil
+ }
+ scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
+ if scopeBucket == nil {
+ return nil
+ }
+ scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
+ if scopeBucket == nil {
+ return nil
+ }
+
+ res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
+ if canSubstitute {
+ if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
+ b := tx.Bucket(digestByUncompressedBucket)
+ if b != nil {
+ b = b.Bucket([]byte(uncompressedDigestValue.String()))
+ if b != nil {
+ if err := b.ForEach(func(k, _ []byte) error {
+ d, err := digest.Parse(string(k))
+ if err != nil {
+ return err
+ }
+ if d != primaryDigest && d != uncompressedDigestValue {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, d)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ if uncompressedDigestValue != primaryDigest {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
+ }
+ }
+ }
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+
+ return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
new file mode 100644
index 000000000..6da9f2805
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
@@ -0,0 +1,63 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/types"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // blobInfoCacheFilename is the file name used for blob info caches.
+ // If the format changes in an incompatible way, increase the version number.
+ blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
+ // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
+ systemBlobInfoCacheDir = "/var/lib/containers/cache"
+)
+
+// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid.
+// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
+func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
+ if sys != nil && sys.BlobInfoCacheDir != "" {
+ return sys.BlobInfoCacheDir, nil
+ }
+
+ // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
+ // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
+ if euid == 0 {
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
+ }
+ return systemBlobInfoCacheDir, nil
+ }
+
+ // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
+ dataDir := os.Getenv("XDG_DATA_HOME")
+ if dataDir == "" {
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ dataDir = filepath.Join(home, ".local", "share")
+ }
+ return filepath.Join(dataDir, "containers", "cache"), nil
+}
+
+// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
+func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
+ dir, err := blobInfoCacheDir(sys, os.Geteuid())
+ if err != nil {
+ logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
+ return NewMemoryCache()
+ }
+ path := filepath.Join(dir, blobInfoCacheFilename)
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", err)
+ return NewMemoryCache()
+ }
+
+ logrus.Debugf("Using blob info cache at %s", path)
+ return NewBoltDBCache(path)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
new file mode 100644
index 000000000..1ce7dee13
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
@@ -0,0 +1,123 @@
+package blobinfocache
+
+import (
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// locationKey only exists to make lookup in knownLocations easier.
+type locationKey struct {
+ transport string
+ scope types.BICTransportScope
+ blobDigest digest.Digest
+}
+
+// memoryCache implements an in-memory-only BlobInfoCache
+type memoryCache struct {
+ uncompressedDigests map[digest.Digest]digest.Digest
+ digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
+ knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
+}
+
+// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
+// This is primarily intended for tests, but also used as a fallback if DefaultCache
+// can’t determine, or set up, the location for a persistent cache.
+// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
+func NewMemoryCache() types.BlobInfoCache {
+ return &memoryCache{
+ uncompressedDigests: map[digest.Digest]digest.Digest{},
+ digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
+ knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
+ }
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ if d, ok := mem.uncompressedDigests[anyDigest]; ok {
+ return d
+ }
+ // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
+ return anyDigest
+ }
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ mem.uncompressedDigests[anyDigest] = uncompressed
+
+ anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
+ if !ok {
+ anyDigestSet = map[digest.Digest]struct{}{}
+ mem.digestsByUncompressed[uncompressed] = anyDigestSet
+ }
+ anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
+ locationScope, ok := mem.knownLocations[key]
+ if !ok {
+ locationScope = map[types.BICLocationReference]time.Time{}
+ mem.knownLocations[key] = locationScope
+ }
+ locationScope[location] = time.Now() // Possibly overwriting an older entry.
+}
+
+// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
+func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
+ locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
+ for l, t := range locations {
+ candidates = append(candidates, candidateWithTime{
+ candidate: types.BICReplacementCandidate{
+ Digest: digest,
+ Location: l,
+ },
+ lastSeen: t,
+ })
+ }
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []candidateWithTime{}
+ res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
+ var uncompressedDigest digest.Digest // = ""
+ if canSubstitute {
+ if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
+ otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
+ for d := range otherDigests {
+ if d != primaryDigest && d != uncompressedDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, d)
+ }
+ }
+ if uncompressedDigest != primaryDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
+ }
+ }
+ }
+ return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none.go
new file mode 100644
index 000000000..5658d89ff
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/none.go
@@ -0,0 +1,47 @@
+package blobinfocache
+
+import (
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// noCache implements a dummy BlobInfoCache which records no data.
+type noCache struct {
+}
+
+// NoCache implements BlobInfoCache by not recording any data.
+//
+// This exists primarily for implementations of configGetter for Manifest.Inspect,
+// because configs only have one representation.
+// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
+var NoCache types.BlobInfoCache = noCache{}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
new file mode 100644
index 000000000..02709aa1c
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
@@ -0,0 +1,108 @@
+package blobinfocache
+
+import (
+ "sort"
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
+// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
+// This is a heuristic/guess, and could well use a different value.
+const replacementAttempts = 5
+
+// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
+type candidateWithTime struct {
+ candidate types.BICReplacementCandidate // The replacement candidate
+ lastSeen time.Time // Time the candidate was last known to exist (either read or written)
+}
+
+// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
+// along with the specially-treated digest values for the implementation of sort.Interface.Less
+type candidateSortState struct {
+ cs []candidateWithTime // The entries to sort
+ primaryDigest digest.Digest // The digest the user actually asked for
+ uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
+}
+
+func (css *candidateSortState) Len() int {
+ return len(css.cs)
+}
+
+func (css *candidateSortState) Less(i, j int) bool {
+ xi := css.cs[i]
+ xj := css.cs[j]
+
+ // primaryDigest entries come first, more recent first.
+ // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
+ // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
+
+ // First, deal with the primaryDigest/uncompressedDigest cases:
+ if xi.candidate.Digest != xj.candidate.Digest {
+ // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
+ if xi.candidate.Digest == css.primaryDigest {
+ return true
+ }
+ if xj.candidate.Digest == css.primaryDigest {
+ return false
+ }
+ if css.uncompressedDigest != "" {
+ if xi.candidate.Digest == css.uncompressedDigest {
+ return false
+ }
+ if xj.candidate.Digest == css.uncompressedDigest {
+ return true
+ }
+ }
+ } else { // xi.candidate.Digest == xj.candidate.Digest
+ // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
+ if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
+ return xi.lastSeen.After(xj.lastSeen)
+ }
+ }
+
+ // Neither of the digests are primaryDigest/uncompressedDigest:
+ if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
+ return xi.lastSeen.After(xj.lastSeen)
+ }
+ // Fall back to digest, if timestamps end up _exactly_ the same (how?!)
+ return xi.candidate.Digest < xj.candidate.Digest
+}
+
+func (css *candidateSortState) Swap(i, j int) {
+ css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
+}
+
+// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
+// number of entries to limit, only to make testing simpler.
+func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
+ // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
+ // compare equal.
+ sort.Sort(&candidateSortState{
+ cs: cs,
+ primaryDigest: primaryDigest,
+ uncompressedDigest: uncompressedDigest,
+ })
+
+ resLength := len(cs)
+ if resLength > maxCandidates {
+ resLength = maxCandidates
+ }
+ res := make([]types.BICReplacementCandidate, resLength)
+ for i := range res {
+ res[i] = cs[i].candidate
+ }
+ return res
+}
+
+// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
+// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
+// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
+//
+// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
+// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
+func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
+ return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
+}
diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
index 067f512ad..afc7312d1 100644
--- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
@@ -3,7 +3,7 @@ package sysregistriesv2
import (
"fmt"
"io/ioutil"
- "net/url"
+ "os"
"path/filepath"
"strings"
"sync"
@@ -82,8 +82,8 @@ func (e *InvalidRegistries) Error() string {
}
// parseURL parses the input string, performs some sanity checks and returns
-// the sanitized input string. An error is returned in case parsing fails or
-// or if URI scheme or user is set.
+// the sanitized input string. An error is returned if the input string is
+// empty or if contains an "http{s,}://" prefix.
func parseURL(input string) (string, error) {
trimmed := strings.TrimRight(input, "/")
@@ -91,49 +91,11 @@ func parseURL(input string) (string, error) {
return "", &InvalidRegistries{s: "invalid URL: cannot be empty"}
}
- // Ultimately, we expect input of the form example.com[/namespace/…], a prefix
- // of a fully-expended reference (containers/image/docker/Reference.String()).
- // c/image/docker/Reference does not currently provide such a parser.
- // So, we use url.Parse("http://"+trimmed) below to ~verify the format, possibly
- // letting some invalid input in, trading that off for a simpler parser.
- //
- // url.Parse("http://"+trimmed) is, sadly, too permissive, notably for
- // trimmed == "http://example.com/…", url.Parse("http://http://example.com/…")
- // is accepted and parsed as
- // {Scheme: "http", Host: "http:", Path: "//example.com/…"}.
- //
- // So, first we do an explicit check for an unwanted scheme prefix:
-
- // This will parse trimmed=="http://example.com/…" with Scheme: "http". Perhaps surprisingly,
- // it also succeeds for the input we want to accept, in different ways:
- // "example.com" -> {Scheme:"", Opaque:"", Path:"example.com"}
- // "example.com/repo" -> {Scheme:"", Opaque:"", Path:"example.com/repo"}
- // "example.com:5000" -> {Scheme:"example.com", Opaque:"5000"}
- // "example.com:5000/repo" -> {Scheme:"example.com", Opaque:"5000/repo"}
- uri, err := url.Parse(trimmed)
- if err != nil {
- return "", &InvalidRegistries{s: fmt.Sprintf("invalid URL '%s': %v", input, err)}
- }
-
- // Check if a URI Scheme is set.
- // Note that URLs that do not start with a slash after the scheme are
- // interpreted as `scheme:opaque[?query][#fragment]`; see above for examples.
- if uri.Scheme != "" && uri.Opaque == "" {
+ if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
msg := fmt.Sprintf("invalid URL '%s': URI schemes are not supported", input)
return "", &InvalidRegistries{s: msg}
}
- uri, err = url.Parse("http://" + trimmed)
- if err != nil {
- msg := fmt.Sprintf("invalid URL '%s': sanitized URL did not parse: %v", input, err)
- return "", &InvalidRegistries{s: msg}
- }
-
- if uri.User != nil {
- msg := fmt.Sprintf("invalid URL '%s': user/password are not supported", trimmed)
- return "", &InvalidRegistries{s: msg}
- }
-
return trimmed, nil
}
@@ -279,7 +241,18 @@ var configMutex = sync.Mutex{}
// are synchronized via configMutex.
var configCache = make(map[string][]Registry)
+// InvalidateCache invalidates the registry cache. This function is meant to be
+// used for long-running processes that need to reload potential changes made to
+// the cached registry config files.
+func InvalidateCache() {
+ configMutex.Lock()
+ defer configMutex.Unlock()
+ configCache = make(map[string][]Registry)
+}
+
// GetRegistries loads and returns the registries specified in the config.
+// Note the parsed content of registry config files is cached. For reloading,
+// use `InvalidateCache` and re-call `GetRegistries`.
func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
configPath := getConfigPath(ctx)
@@ -293,6 +266,13 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
// load the config
config, err := loadRegistryConf(configPath)
if err != nil {
+ // Return an empty []Registry if we use the default config,
+ // which implies that the config path of the SystemContext
+ // isn't set. Note: if ctx.SystemRegistriesConfPath points to
+ // the default config, we will still return an error.
+ if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
+ return []Registry{}, nil
+ }
return nil, err
}
@@ -323,23 +303,33 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
// FindUnqualifiedSearchRegistries returns all registries that are configured
// for unqualified image search (i.e., with Registry.Search == true).
-func FindUnqualifiedSearchRegistries(registries []Registry) []Registry {
+func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, error) {
+ registries, err := GetRegistries(ctx)
+ if err != nil {
+ return nil, err
+ }
+
unqualified := []Registry{}
for _, reg := range registries {
if reg.Search {
unqualified = append(unqualified, reg)
}
}
- return unqualified
+ return unqualified, nil
}
// FindRegistry returns the Registry with the longest prefix for ref. If no
// Registry prefixes the image, nil is returned.
-func FindRegistry(ref string, registries []Registry) *Registry {
+func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
+ registries, err := GetRegistries(ctx)
+ if err != nil {
+ return nil, err
+ }
+
reg := Registry{}
prefixLen := 0
for _, r := range registries {
- if strings.HasPrefix(ref, r.Prefix) {
+ if strings.HasPrefix(ref, r.Prefix+"/") || ref == r.Prefix {
length := len(r.Prefix)
if length > prefixLen {
reg = r
@@ -348,9 +338,9 @@ func FindRegistry(ref string, registries []Registry) *Registry {
}
}
if prefixLen != 0 {
- return &reg
+ return &reg, nil
}
- return nil
+ return nil, nil
}
// Reads the global registry file from the filesystem. Returns a byte array.
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index d1b010a76..bd6813119 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -16,6 +16,7 @@ import (
"github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
@@ -99,8 +100,10 @@ func (s storageImageSource) Close() error {
return nil
}
-// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
if info.Digest == image.GzippedEmptyLayerDigest {
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
}
@@ -317,9 +320,17 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
}
-// PutBlob stores a layer or data blob in our temporary directory, checking that any information
-// in the blobinfo matches the incoming data.
-func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ // Stores a layer or data blob in our temporary directory, checking that any information
+ // in the blobinfo matches the incoming data.
errorBlobInfo := types.BlobInfo{
Digest: "",
Size: -1,
@@ -370,6 +381,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
if blobSize < 0 {
blobSize = counter.Count
}
+ // This is safe because we have just computed both values ourselves.
+ cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return types.BlobInfo{
Digest: blobDigest,
Size: blobSize,
@@ -377,59 +390,82 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
-// reapplied using ReapplyBlob.
-//
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (s *storageImageDestination) HasBlob(ctx context.Context, blobinfo types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if blobinfo.Digest == "" {
- return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
}
if err := blobinfo.Digest.Validate(); err != nil {
- return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
}
+
// Check if we've already cached it in a file.
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
- return true, size, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: size,
+ MediaType: blobinfo.MediaType,
+ }, nil
}
+
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
}
if len(layers) > 0 {
// Save this for completeness.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].UncompressedSize, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
}
+
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].CompressedSize, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].CompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Does the blob correspond to a known DiffID which we already have available?
+ // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
+ // uncompressed layer, and that can happen only if canSubstitute.
+ if canSubstitute {
+ if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
+ }
+ if len(layers) > 0 {
+ s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: uncompressedDigest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
}
- // Nope, we don't have it.
- return false, -1, nil
-}
-// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
-// same one when it walks the list in the manifest.
-func (s *storageImageDestination) ReapplyBlob(ctx context.Context, blobinfo types.BlobInfo) (types.BlobInfo, error) {
- present, size, err := s.HasBlob(ctx, blobinfo)
- if !present {
- return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
- }
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
- }
- blobinfo.Size = size
- return blobinfo, nil
+ // Nope, we don't have it.
+ return false, types.BlobInfo{}, nil
}
// computeID computes a recommended image ID based on information we have so far. If
@@ -514,8 +550,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
+ // Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
+ // that relies on using a blob digest that has never been seeen by the store had better call
+ // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
+ // so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- has, _, err := s.HasBlob(ctx, blob.BlobInfo)
+ has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
if err != nil {
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
}
diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go
index 17af60b30..ee963b8d8 100644
--- a/vendor/github.com/containers/image/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/tarball/tarball_src.go
@@ -207,7 +207,10 @@ func (is *tarballImageSource) Close() error {
return nil
}
-func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
// We should only be asked about things in the manifest. Maybe the configuration blob.
if blobinfo.Digest == is.configID {
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index 5d05b711a..dda332776 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -100,6 +100,82 @@ type BlobInfo struct {
MediaType string
}
+// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
+// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
+// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICTransportScope struct {
+ Opaque string
+}
+
+// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope.
+// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob
+// can look it up using BlobInfoCache.CandidateLocations.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICLocationReference struct {
+ Opaque string
+}
+
+// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations.
+type BICReplacementCandidate struct {
+ Digest digest.Digest
+ Location BICLocationReference
+}
+
+// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
+//
+// It records two kinds of data:
+// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
+// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
+// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
+// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
+//
+// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
+// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
+//
+// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
+// compress/decompress blobs for their own purposes.
+//
+// - Known blob locations, managed by individual transports:
+// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
+// recording transport-specific information that allows the transport to reuse the blob in the future;
+// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
+//
+// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
+// can be directly reused within a registry, or mounted across registries within a registry server.)
+//
+// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
+// users of the cahce should just fall back to copying the blobs the usual way.
+type BlobInfoCache interface {
+ // UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+ // May return anyDigest if it is known to be uncompressed.
+ // Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+ UncompressedDigest(anyDigest digest.Digest) digest.Digest
+ // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+ // It’s allowed for anyDigest == uncompressed.
+ // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+ // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+ // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+ RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest)
+
+ // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+ // and can be reused given the opaque location data.
+ RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference)
+ // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+ // within the specified (transport scope) (if they still exist, which is not guaranteed).
+ //
+ // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+ // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+ // uncompressed digest.
+ CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
+}
+
// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
// This is primarily useful for copying images around; for examining their properties, Image (below)
// is usually more useful.
@@ -120,7 +196,8 @@ type ImageSource interface {
GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
- GetBlob(context.Context, BlobInfo) (io.ReadCloser, int64, error)
+ // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+ GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
@@ -148,8 +225,7 @@ const (
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
//
// There is a specific required order for some of the calls:
-// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
-// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
+// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
//
@@ -183,17 +259,19 @@ type ImageDestination interface {
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
+ // May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
- PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error)
- // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
- // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
- // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
- // it returns a non-nil error only on an unexpected failure.
- HasBlob(ctx context.Context, info BlobInfo) (bool, int64, error)
- // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
- ReapplyBlob(ctx context.Context, info BlobInfo) (BlobInfo, error)
+ PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
+ // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+ // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ // May use and/or update cache.
+ TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
@@ -324,6 +402,30 @@ type DockerAuthConfig struct {
Password string
}
+// OptionalBool is a boolean with an additional undefined value, which is meant
+// to be used in the context of user input to distinguish between a
+// user-specified value and a default value.
+type OptionalBool byte
+
+const (
+ // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written.
+ OptionalBoolUndefined OptionalBool = iota
+ // OptionalBoolTrue represents the boolean true.
+ OptionalBoolTrue
+ // OptionalBoolFalse represents the boolean false.
+ OptionalBoolFalse
+)
+
+// NewOptionalBool converts the input bool into either OptionalBoolTrue or
+// OptionalBoolFalse. The function is meant to avoid boilerplate code of users.
+func NewOptionalBool(b bool) OptionalBool {
+ o := OptionalBoolFalse
+ if b == true {
+ o = OptionalBoolTrue
+ }
+ return o
+}
+
// SystemContext allows parameterizing access to implicitly-accessed resources,
// like configuration files in /etc and users' login state in their home directory.
// Various components can share the same field only if their semantics is exactly
@@ -351,6 +453,8 @@ type SystemContext struct {
ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
OSChoice string
+ // If not "", overrides the system's default directory containing a blob info cache.
+ BlobInfoCacheDir string
// Additional tags when creating or copying a docker-archive.
DockerArchiveAdditionalTags []reference.NamedTagged
@@ -376,7 +480,7 @@ type SystemContext struct {
// Ignored if DockerCertPath is non-empty.
DockerPerHostCertDirPath string
// Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
- DockerInsecureSkipTLSVerify bool
+ DockerInsecureSkipTLSVerify OptionalBool
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
DockerAuthConfig *DockerAuthConfig
// if not "", an User-Agent header is added to each request when contacting a registry.
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index 246c0096a..88537981a 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -34,7 +34,7 @@ github.com/xeipuuv/gojsonschema master
github.com/xeipuuv/gojsonreference master
github.com/xeipuuv/gojsonpointer master
github.com/tchap/go-patricia v2.2.6
-github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
+github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
@@ -43,3 +43,4 @@ github.com/syndtr/gocapability master
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
github.com/ulikunitz/xz v0.5.4
+github.com/boltdb/bolt master
diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md
index f68cc55c3..fef46a689 100644
--- a/vendor/github.com/containers/storage/README.md
+++ b/vendor/github.com/containers/storage/README.md
@@ -2,7 +2,7 @@
layers, container images, and containers. A `containers-storage` CLI wrapper
is also included for manual and scripting use.
-To build the CLI wrapper, use 'make build-binary'.
+To build the CLI wrapper, use 'make binary'.
Operations which use VMs expect to launch them using 'vagrant', defaulting to
using its 'libvirt' provider. The boxes used are also available for the
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index 0a125331d..beaf41f39 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -147,6 +147,13 @@ func (c *Container) ProcessLabel() string {
return ""
}
+func (c *Container) MountOpts() []string {
+ if mountOpts, ok := c.Flags["MountOpts"].([]string); ok {
+ return mountOpts
+ }
+ return nil
+}
+
func (r *containerStore) Containers() ([]Container, error) {
containers := make([]Container, len(r.containers))
for i := range r.containers {
@@ -293,6 +300,9 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
}
+ if options.MountOpts != nil {
+ options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
+ }
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go
index 6e83808d4..aef6becfe 100644
--- a/vendor/github.com/containers/storage/containers_ffjson.go
+++ b/vendor/github.com/containers/storage/containers_ffjson.go
@@ -1,6 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: containers.go
-//
package storage
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index f14ba24b9..ca69816be 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -405,7 +405,7 @@ func atomicRemove(source string) error {
case os.IsExist(err):
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
if _, e := os.Stat(source); !os.IsNotExist(e) {
- return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
+ return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target)
}
default:
return errors.Wrapf(err, "error preparing atomic delete")
@@ -441,7 +441,7 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
if len(parents) > 0 {
- if err := a.mount(id, m, options.MountLabel, parents); err != nil {
+ if err := a.mount(id, m, parents, options); err != nil {
return "", err
}
}
@@ -585,7 +585,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
return layers, nil
}
-func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
+func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error {
a.Lock()
defer a.Unlock()
@@ -596,7 +596,7 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str
rw := a.getDiffPath(id)
- if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
+ if err := a.aufsMount(layers, rw, target, options); err != nil {
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
}
return nil
@@ -643,7 +643,7 @@ func (a *Driver) Cleanup() error {
return mountpk.Unmount(a.root)
}
-func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
+func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
defer func() {
if err != nil {
Unmount(target)
@@ -657,7 +657,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
if useDirperm() {
offset += len(",dirperm1")
}
- b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
+ b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
index := 0
@@ -670,21 +670,25 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
}
opts := "dio,xino=/dev/shm/aufs.xino"
- if a.mountOptions != "" {
- opts += fmt.Sprintf(",%s", a.mountOptions)
+ mountOptions := a.mountOptions
+ if len(options.Options) > 0 {
+ mountOptions = strings.Join(options.Options, ",")
+ }
+ if mountOptions != "" {
+ opts += fmt.Sprintf(",%s", mountOptions)
}
if useDirperm() {
opts += ",dirperm1"
}
- data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
+ data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel)
if err = mount("none", target, "aufs", 0, data); err != nil {
return
}
for ; index < len(ro); index++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
- data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
+ data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel)
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
return
}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index adc34d209..567cda9d3 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -640,6 +640,9 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if err != nil {
return "", err
}
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("btrfs driver does not support mount options")
+ }
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy.go b/vendor/github.com/containers/storage/drivers/copy/copy.go
new file mode 100644
index 000000000..2617824c5
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/copy/copy.go
@@ -0,0 +1,277 @@
+// +build linux
+
+package copy
+
+/*
+#include <linux/fs.h>
+
+#ifndef FICLONE
+#define FICLONE _IOW(0x94, 9, int)
+#endif
+*/
+import "C"
+import (
+ "container/list"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+ "time"
+
+ "github.com/containers/storage/pkg/pools"
+ "github.com/containers/storage/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "golang.org/x/sys/unix"
+)
+
+// Mode indicates whether to use hardlink or copy content
+type Mode int
+
+const (
+ // Content creates a new file, and copies the content of the file
+ Content Mode = iota
+ // Hardlink creates a new hardlink to the existing file
+ Hardlink
+)
+
+func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
+ srcFile, err := os.Open(srcPath)
+ if err != nil {
+ return err
+ }
+ defer srcFile.Close()
+
+ // If the destination file already exists, we shouldn't blow it away
+ dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
+ if err != nil {
+ return err
+ }
+ defer dstFile.Close()
+
+ if *copyWithFileClone {
+ _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
+ if err == nil {
+ return nil
+ }
+
+ *copyWithFileClone = false
+ if err == unix.EXDEV {
+ *copyWithFileRange = false
+ }
+ }
+ if *copyWithFileRange {
+ err = doCopyWithFileRange(srcFile, dstFile, fileinfo)
+ // Trying the file_clone may not have caught the exdev case
+ // as the ioctl may not have been available (therefore EINVAL)
+ if err == unix.EXDEV || err == unix.ENOSYS {
+ *copyWithFileRange = false
+ } else {
+ return err
+ }
+ }
+ return legacyCopy(srcFile, dstFile)
+}
+
+func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error {
+ amountLeftToCopy := fileinfo.Size()
+
+ for amountLeftToCopy > 0 {
+ n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0)
+ if err != nil {
+ return err
+ }
+
+ amountLeftToCopy = amountLeftToCopy - int64(n)
+ }
+
+ return nil
+}
+
+func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
+ _, err := pools.Copy(dstFile, srcFile)
+
+ return err
+}
+
+func copyXattr(srcPath, dstPath, attr string) error {
+ data, err := system.Lgetxattr(srcPath, attr)
+ if err != nil {
+ return err
+ }
+ if data != nil {
+ if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type fileID struct {
+ dev uint64
+ ino uint64
+}
+
+type dirMtimeInfo struct {
+ dstPath *string
+ stat *syscall.Stat_t
+}
+
+// DirCopy copies or hardlinks the contents of one directory to another,
+// properly handling xattrs, and soft links
+//
+// Copying xattrs can be opted out of by passing false for copyXattrs.
+func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
+ copyWithFileRange := true
+ copyWithFileClone := true
+
+ // This is a map of source file inodes to dst file paths
+ copiedFiles := make(map[fileID]string)
+
+ dirsToSetMtimes := list.New()
+ err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(srcDir, srcPath)
+ if err != nil {
+ return err
+ }
+
+ dstPath := filepath.Join(dstDir, relPath)
+ if err != nil {
+ return err
+ }
+
+ stat, ok := f.Sys().(*syscall.Stat_t)
+ if !ok {
+ return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
+ }
+
+ isHardlink := false
+
+ switch f.Mode() & os.ModeType {
+ case 0: // Regular file
+ id := fileID{dev: stat.Dev, ino: stat.Ino}
+ if copyMode == Hardlink {
+ isHardlink = true
+ if err2 := os.Link(srcPath, dstPath); err2 != nil {
+ return err2
+ }
+ } else if hardLinkDstPath, ok := copiedFiles[id]; ok {
+ if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil {
+ return err2
+ }
+ } else {
+ if err2 := copyRegular(srcPath, dstPath, f, &copyWithFileRange, &copyWithFileClone); err2 != nil {
+ return err2
+ }
+ copiedFiles[id] = dstPath
+ }
+
+ case os.ModeDir:
+ if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ case os.ModeSymlink:
+ link, err := os.Readlink(srcPath)
+ if err != nil {
+ return err
+ }
+
+ if err := os.Symlink(link, dstPath); err != nil {
+ return err
+ }
+
+ case os.ModeNamedPipe:
+ fallthrough
+ case os.ModeSocket:
+ if err := unix.Mkfifo(dstPath, stat.Mode); err != nil {
+ return err
+ }
+
+ case os.ModeDevice:
+ if rsystem.RunningInUserNS() {
+ // cannot create a device if running in user namespace
+ return nil
+ }
+ if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("unknown file type for %s", srcPath)
+ }
+
+ // Everything below is copying metadata from src to dst. All this metadata
+ // already shares an inode for hardlinks.
+ if isHardlink {
+ return nil
+ }
+
+ if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
+ return err
+ }
+
+ if copyXattrs {
+ if err := doCopyXattrs(srcPath, dstPath); err != nil {
+ return err
+ }
+ }
+
+ isSymlink := f.Mode()&os.ModeSymlink != 0
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if !isSymlink {
+ if err := os.Chmod(dstPath, f.Mode()); err != nil {
+ return err
+ }
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ // nolint: unconvert
+ if f.IsDir() {
+ dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
+ } else if !isSymlink {
+ aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+ mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
+ if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{stat.Atim, stat.Mtim}
+ if err := system.LUtimesNano(dstPath, ts); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() {
+ mtimeInfo := e.Value.(*dirMtimeInfo)
+ ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim}
+ if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func doCopyXattrs(srcPath, dstPath string) error {
+ if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
+ return err
+ }
+
+ // We need to copy this attribute if it appears in an overlay upper layer, as
+ // this function is used to copy those. It is set by overlay if a directory
+ // is removed and then re-created and should not inherit anything from the
+ // same dir in the lower dir.
+ return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
+}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index cbf67b3eb..b6f22e90a 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -2364,7 +2364,7 @@ func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
}
// MountDevice mounts the device if not already mounted.
-func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
+func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error {
info, err := devices.lookupDeviceWithLock(hash)
if err != nil {
return err
@@ -2396,8 +2396,17 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
options = joinMountOptions(options, "nouuid")
}
- options = joinMountOptions(options, devices.mountOptions)
- options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
+ mountOptions := devices.mountOptions
+ if len(moptions.Options) > 0 {
+ addNouuid := strings.Contains("nouuid", mountOptions)
+ mountOptions = strings.Join(moptions.Options, ",")
+ if addNouuid {
+ mountOptions = fmt.Sprintf("nouuid,%s", mountOptions)
+ }
+ }
+
+ options = joinMountOptions(options, mountOptions)
+ options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index 9fc082d7d..39a4fbe2c 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -9,8 +9,6 @@ import (
"path"
"strconv"
- "github.com/sirupsen/logrus"
-
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/devicemapper"
"github.com/containers/storage/pkg/idtools"
@@ -18,6 +16,7 @@ import (
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
+ "github.com/sirupsen/logrus"
)
func init() {
@@ -189,7 +188,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
// Mount the device
- if err := d.DeviceSet.MountDevice(id, mp, options.MountLabel); err != nil {
+ if err := d.DeviceSet.MountDevice(id, mp, options); err != nil {
d.ctr.Decrement(mp)
return "", err
}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 4569c7b59..476b55160 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -49,6 +49,7 @@ type MountOpts struct {
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
UidMaps []idtools.IDMap
GidMaps []idtools.IDMap
+ Options []string
}
// InitFunc initializes the storage driver.
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index 19da7d101..ab2c41e58 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/sirupsen/logrus"
)
@@ -167,7 +168,9 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin
}
defer driver.Put(id)
- options := &archive.TarOptions{}
+ options := &archive.TarOptions{
+ InUserNS: rsystem.RunningInUserNS(),
+ }
if applyMappings != nil {
options.UIDMaps = applyMappings.UIDs()
options.GIDMaps = applyMappings.GIDs()
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index d2cc65bca..df736c0a9 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -29,6 +29,7 @@ import (
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -340,6 +341,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
func (d *Driver) useNaiveDiff() bool {
useNaiveDiffLock.Do(func() {
+ if d.options.mountProgram != "" {
+ useNaiveDiffOnly = true
+ return
+ }
if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil {
logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
useNaiveDiffOnly = true
@@ -739,7 +744,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
workDir := path.Join(dir, "work")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
- if d.options.mountOptions != "" {
+ if len(options.Options) > 0 {
+ opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts)
+ } else if d.options.mountOptions != "" {
opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts)
}
mountData := label.FormatMountLabel(opts, options.MountLabel)
@@ -841,6 +848,17 @@ func (d *Driver) isParent(id, parent string) bool {
return ld == parentDir
}
+func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
+ whiteoutFormat := archive.OverlayWhiteoutFormat
+ if d.options.mountProgram != "" {
+ // If we are using a mount program, we are most likely running
+ // as an unprivileged user that cannot use mknod, so fallback to the
+ // AUFS whiteout format.
+ whiteoutFormat = archive.AUFSWhiteoutFormat
+ }
+ return whiteoutFormat
+}
+
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent string, mountLabel string, diff io.Reader) (size int64, err error) {
if !d.isParent(id, parent) {
@@ -858,7 +876,8 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str
if err := untar(diff, applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
- WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ WhiteoutFormat: d.getWhiteoutFormat(),
+ InUserNS: rsystem.RunningInUserNS(),
}); err != nil {
return 0, err
}
@@ -911,7 +930,7 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string,
Compression: archive.Uncompressed,
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
- WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ WhiteoutFormat: d.getWhiteoutFormat(),
WhiteoutData: lowerDirs,
})
}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
new file mode 100644
index 000000000..8137fcf67
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
@@ -0,0 +1,7 @@
+package vfs
+
+import "github.com/containers/storage/drivers/copy"
+
+func dirCopy(srcDir, dstDir string) error {
+ return copy.DirCopy(srcDir, dstDir, copy.Content, false)
+}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
new file mode 100644
index 000000000..8ac80ee1d
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package vfs // import "github.com/containers/storage/drivers/vfs"
+
+import "github.com/containers/storage/pkg/chrootarchive"
+
+func dirCopy(srcDir, dstDir string) error {
+ return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir)
+}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index d10fb2607..f7f3c75ba 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -7,7 +7,6 @@ import (
"strings"
"github.com/containers/storage/drivers"
- "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ostree"
"github.com/containers/storage/pkg/system"
@@ -15,8 +14,8 @@ import (
)
var (
- // CopyWithTar defines the copy method to use.
- CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
+ // CopyDir defines the copy method to use.
+ CopyDir = dirCopy
)
func init() {
@@ -141,7 +140,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
if err != nil {
return fmt.Errorf("%s: %s", parent, err)
}
- if err := CopyWithTar(parentDir, dir); err != nil {
+ if err := dirCopy(parentDir, dir); err != nil {
return err
}
}
@@ -181,6 +180,9 @@ func (d *Driver) Remove(id string) error {
// Get returns the directory for the given id.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
dir := d.dir(id)
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("vfs driver does not support mount options")
+ }
if st, err := os.Stat(dir); err != nil {
return "", err
} else if !st.IsDir() {
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 4ccf657dc..c6d86a4ab 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -367,6 +367,9 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel)
var dir string
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("windows driver does not support mount options")
+ }
rID, err := d.resolveID(id)
if err != nil {
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index cb4424f2d..c3ce6e869 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -52,7 +52,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
}
- file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600)
+ file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600)
if err != nil {
logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err)
return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
@@ -366,8 +366,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return mountpoint, nil
}
+ mountOptions := d.options.mountOptions
+ if len(options.Options) > 0 {
+ mountOptions = strings.Join(options.Options, ",")
+ }
+
filesystem := d.zfsPath(id)
- opts := label.FormatMountLabel(d.options.mountOptions, options.MountLabel)
+ opts := label.FormatMountLabel(mountOptions, options.MountLabel)
logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 1275ab47c..0b532eb77 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -28,9 +28,8 @@ import (
)
const (
- tarSplitSuffix = ".tar-split.gz"
- incompleteFlag = "incomplete"
- compressionFlag = "diff-compression"
+ tarSplitSuffix = ".tar-split.gz"
+ incompleteFlag = "incomplete"
)
// A Layer is a record of a copy-on-write layer that's stored by the lower
@@ -542,8 +541,8 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
_, idInUse = r.byid[id]
}
}
- if _, idInUse := r.byid[id]; idInUse {
- return nil, -1, ErrDuplicateID
+ if duplicateLayer, idInUse := r.byid[id]; idInUse {
+ return duplicateLayer, -1, ErrDuplicateID
}
names = dedupeNames(names)
for _, name := range names {
@@ -841,8 +840,12 @@ func (r *layerStore) Delete(id string) error {
return ErrLayerUnknown
}
id = layer.ID
- if _, err := r.Unmount(id, true); err != nil {
- return err
+ // This check is needed for idempotency of delete where the layer could have been
+ // already unmounted (since c/storage gives you that API directly)
+ for layer.MountCount > 0 {
+ if _, err := r.Unmount(id, false); err != nil {
+ return err
+ }
}
err := r.driver.Remove(id)
if err == nil {
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
index 09b5d0f33..125b5d8c9 100644
--- a/vendor/github.com/containers/storage/layers_ffjson.go
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./layers.go
+// source: layers.go
package storage
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 4c4382625..8d6eaacf3 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -22,6 +22,7 @@ import (
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/promise"
"github.com/containers/storage/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/sirupsen/logrus"
)
@@ -1054,6 +1055,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
GIDMaps: tarMappings.GIDs(),
Compression: Uncompressed,
CopyPass: true,
+ InUserNS: rsystem.RunningInUserNS(),
}
archive, err := TarWithOptions(src, options)
if err != nil {
@@ -1068,6 +1070,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1087,6 +1090,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1186,6 +1190,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
UIDMaps: archiver.UntarIDMappings.UIDs(),
GIDMaps: archiver.UntarIDMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
err = archiver.Untar(r, filepath.Dir(dst), options)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index b9fa228e6..dde8d44d3 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
)
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
@@ -52,6 +53,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
if options == nil {
options = &archive.TarOptions{}
+ options.InUserNS = rsystem.RunningInUserNS()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go
new file mode 100644
index 000000000..c56aa86a2
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go
@@ -0,0 +1,56 @@
+package idtools
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func nonDigitsToWhitespace(r rune) rune {
+ if !strings.ContainsRune("0123456789", r) {
+ return ' '
+ }
+ return r
+}
+
+func parseTriple(spec []string) (container, host, size uint32, err error) {
+ cid, err := strconv.ParseUint(spec[0], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
+ }
+ hid, err := strconv.ParseUint(spec[1], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
+ }
+ sz, err := strconv.ParseUint(spec[2], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
+ }
+ return uint32(cid), uint32(hid), uint32(sz), nil
+}
+
+// ParseIDMap parses idmap triples from string.
+func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
+ for _, idMapSpec := range mapSpec {
+ idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
+ if len(idSpec)%3 != 0 {
+ return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
+ }
+ for i := range idSpec {
+ if i%3 != 0 {
+ continue
+ }
+ cid, hid, size, err := parseTriple(idSpec[i : i+3])
+ if err != nil {
+ return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
+ }
+ mapping := IDMap{
+ ContainerID: int(cid),
+ HostID: int(hid),
+ Size: int(size),
+ }
+ idmap = append(idmap, mapping)
+ }
+ }
+ return idmap, nil
+}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 7eaa82910..e0dd1b92f 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -8,7 +8,6 @@ import (
"os"
"path/filepath"
"reflect"
- "strconv"
"strings"
"sync"
"time"
@@ -22,6 +21,7 @@ import (
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
digest "github.com/opencontainers/go-digest"
@@ -502,6 +502,7 @@ type ContainerOptions struct {
IDMappingOptions
LabelOpts []string
Flags map[string]interface{}
+ MountOpts []string
}
type store struct {
@@ -1069,7 +1070,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, read
}
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc)
if err != nil {
- return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q")
+ return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q", parentLayer.ID)
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
@@ -2145,21 +2146,20 @@ func (s *store) DeleteContainer(id string) error {
if err = rlstore.Delete(container.LayerID); err != nil {
return err
}
- if err = rcstore.Delete(id); err != nil {
- return err
- }
- middleDir := s.graphDriverName + "-containers"
- gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
- if err = os.RemoveAll(gcpath); err != nil {
- return err
- }
- rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
- if err = os.RemoveAll(rcpath); err != nil {
- return err
- }
- return nil
}
- return ErrNotALayer
+ if err = rcstore.Delete(id); err != nil {
+ return err
+ }
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
+ if err = os.RemoveAll(gcpath); err != nil {
+ return err
+ }
+ rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
+ if err = os.RemoveAll(rcpath); err != nil {
+ return err
+ }
+ return nil
}
}
return ErrNotAContainer
@@ -2280,10 +2280,14 @@ func (s *store) Version() ([][2]string, error) {
func (s *store) Mount(id, mountLabel string) (string, error) {
container, err := s.Container(id)
- var uidMap, gidMap []idtools.IDMap
+ var (
+ uidMap, gidMap []idtools.IDMap
+ mountOpts []string
+ )
if err == nil {
uidMap, gidMap = container.UIDMap, container.GIDMap
id = container.LayerID
+ mountOpts = container.MountOpts()
}
rlstore, err := s.LayerStore()
if err != nil {
@@ -2299,6 +2303,7 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
MountLabel: mountLabel,
UidMaps: uidMap,
GidMaps: gidMap,
+ Options: mountOpts,
}
return rlstore.Mount(id, options)
}
@@ -3203,56 +3208,19 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
storeOptions.UIDMap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs()
}
- nonDigitsToWhitespace := func(r rune) rune {
- if strings.IndexRune("0123456789", r) == -1 {
- return ' '
- } else {
- return r
- }
- }
- parseTriple := func(spec []string) (container, host, size uint32, err error) {
- cid, err := strconv.ParseUint(spec[0], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
- }
- hid, err := strconv.ParseUint(spec[1], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
- }
- sz, err := strconv.ParseUint(spec[2], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
- }
- return uint32(cid), uint32(hid), uint32(sz), nil
+
+ uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
+ if err != nil {
+ fmt.Print(err)
+ } else {
+ storeOptions.UIDMap = append(storeOptions.UIDMap, uidmap...)
}
- parseIDMap := func(idMapSpec, mapSetting string) (idmap []idtools.IDMap) {
- if len(idMapSpec) > 0 {
- idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
- if len(idSpec)%3 != 0 {
- fmt.Printf("Error initializing ID mappings: %s setting is malformed.\n", mapSetting)
- return nil
- }
- for i := range idSpec {
- if i%3 != 0 {
- continue
- }
- cid, hid, size, err := parseTriple(idSpec[i : i+3])
- if err != nil {
- fmt.Printf("Error initializing ID mappings: %s setting is malformed.\n", mapSetting)
- return nil
- }
- mapping := idtools.IDMap{
- ContainerID: int(cid),
- HostID: int(hid),
- Size: int(size),
- }
- idmap = append(idmap, mapping)
- }
- }
- return idmap
+ gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
+ if err != nil {
+ fmt.Print(err)
+ } else {
+ storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...)
}
- storeOptions.UIDMap = append(storeOptions.UIDMap, parseIDMap(config.Storage.Options.RemapUIDs, "remap-uids")...)
- storeOptions.GIDMap = append(storeOptions.GIDMap, parseIDMap(config.Storage.Options.RemapGIDs, "remap-gids")...)
if os.Getenv("STORAGE_DRIVER") != "" {
storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
}
@@ -3271,3 +3239,23 @@ func init() {
ReloadConfigurationFile(defaultConfigFile, &DefaultStoreOptions)
}
+
+func GetDefaultMountOptions() ([]string, error) {
+ mountOpts := []string{
+ ".mountopt",
+ fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName),
+ }
+ for _, option := range DefaultStoreOptions.GraphDriverOptions {
+ key, val, err := parsers.ParseKeyValueOpt(option)
+ if err != nil {
+ return nil, err
+ }
+ key = strings.ToLower(key)
+ for _, m := range mountOpts {
+ if m == key {
+ return strings.Split(val, ","), nil
+ }
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index 2276d5531..fa52584d7 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -2,13 +2,14 @@ github.com/BurntSushi/toml master
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
-github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
+github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
+github.com/ostreedev/ostree-go master
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/pkg/errors master
github.com/pmezard/go-difflib v1.0.0
@@ -20,4 +21,5 @@ github.com/tchap/go-patricia v2.2.6
github.com/vbatts/tar-split v0.10.2
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
-github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
+gotest.tools master
+github.com/google/go-cmp master
diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/shlex/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README
new file mode 100644
index 000000000..c86bcc066
--- /dev/null
+++ b/vendor/github.com/google/shlex/README
@@ -0,0 +1,2 @@
+go-shlex is a simple lexer for go that supports shell-style quoting,
+commenting, and escaping.
diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go
new file mode 100644
index 000000000..d98308bce
--- /dev/null
+++ b/vendor/github.com/google/shlex/shlex.go
@@ -0,0 +1,416 @@
+/*
+Copyright 2012 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package shlex implements a simple lexer which splits input in to tokens using
+shell-style rules for quoting and commenting.
+
+The basic use case uses the default ASCII lexer to split a string into sub-strings:
+
+ shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
+
+To process a stream of strings:
+
+ l := NewLexer(os.Stdin)
+ for ; token, err := l.Next(); err != nil {
+ // process token
+ }
+
+To access the raw token stream (which includes tokens for comments):
+
+ t := NewTokenizer(os.Stdin)
+ for ; token, err := t.Next(); err != nil {
+ // process token
+ }
+
+*/
+package shlex
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// TokenType is a top-level token classification: A word, space, comment, unknown.
+type TokenType int
+
+// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
+type runeTokenClass int
+
+// the internal state used by the lexer state machine
+type lexerState int
+
+// Token is a (type, value) pair representing a lexographical token.
+type Token struct {
+ tokenType TokenType
+ value string
+}
+
+// Equal reports whether tokens a, and b, are equal.
+// Two tokens are equal if both their types and values are equal. A nil token can
+// never be equal to another token.
+func (a *Token) Equal(b *Token) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.tokenType != b.tokenType {
+ return false
+ }
+ return a.value == b.value
+}
+
+// Named classes of UTF-8 runes
+const (
+ spaceRunes = " \t\r\n"
+ escapingQuoteRunes = `"`
+ nonEscapingQuoteRunes = "'"
+ escapeRunes = `\`
+ commentRunes = "#"
+)
+
+// Classes of rune token
+const (
+ unknownRuneClass runeTokenClass = iota
+ spaceRuneClass
+ escapingQuoteRuneClass
+ nonEscapingQuoteRuneClass
+ escapeRuneClass
+ commentRuneClass
+ eofRuneClass
+)
+
+// Classes of lexographic token
+const (
+ UnknownToken TokenType = iota
+ WordToken
+ SpaceToken
+ CommentToken
+)
+
+// Lexer state machine states
+const (
+ startState lexerState = iota // no runes have been seen
+ inWordState // processing regular runes in a word
+ escapingState // we have just consumed an escape rune; the next rune is literal
+ escapingQuotedState // we have just consumed an escape rune within a quoted string
+ quotingEscapingState // we are within a quoted string that supports escaping ("...")
+ quotingState // we are within a string that does not support escaping ('...')
+ commentState // we are within a comment (everything following an unquoted or unescaped #
+)
+
+// tokenClassifier is used for classifying rune characters.
+type tokenClassifier map[rune]runeTokenClass
+
+func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
+ for _, runeChar := range runes {
+ typeMap[runeChar] = tokenType
+ }
+}
+
+// newDefaultClassifier creates a new classifier for ASCII characters.
+func newDefaultClassifier() tokenClassifier {
+ t := tokenClassifier{}
+ t.addRuneClass(spaceRunes, spaceRuneClass)
+ t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
+ t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
+ t.addRuneClass(escapeRunes, escapeRuneClass)
+ t.addRuneClass(commentRunes, commentRuneClass)
+ return t
+}
+
+// ClassifyRune classifiees a rune
+func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
+ return t[runeVal]
+}
+
+// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
+type Lexer Tokenizer
+
+// NewLexer creates a new lexer from an input stream.
+func NewLexer(r io.Reader) *Lexer {
+
+ return (*Lexer)(NewTokenizer(r))
+}
+
+// Next returns the next word, or an error. If there are no more words,
+// the error will be io.EOF.
+func (l *Lexer) Next() (string, error) {
+ for {
+ token, err := (*Tokenizer)(l).Next()
+ if err != nil {
+ return "", err
+ }
+ switch token.tokenType {
+ case WordToken:
+ return token.value, nil
+ case CommentToken:
+ // skip comments
+ default:
+ return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
+ }
+ }
+}
+
+// Tokenizer turns an input stream into a sequence of typed tokens
+type Tokenizer struct {
+ input bufio.Reader
+ classifier tokenClassifier
+}
+
+// NewTokenizer creates a new tokenizer from an input stream.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ input := bufio.NewReader(r)
+ classifier := newDefaultClassifier()
+ return &Tokenizer{
+ input: *input,
+ classifier: classifier}
+}
+
+// scanStream scans the stream for the next token using the internal state machine.
+// It will panic if it encounters a rune which it does not know how to handle.
+func (t *Tokenizer) scanStream() (*Token, error) {
+ state := startState
+ var tokenType TokenType
+ var value []rune
+ var nextRune rune
+ var nextRuneType runeTokenClass
+ var err error
+
+ for {
+ nextRune, _, err = t.input.ReadRune()
+ nextRuneType = t.classifier.ClassifyRune(nextRune)
+
+ if err == io.EOF {
+ nextRuneType = eofRuneClass
+ err = nil
+ } else if err != nil {
+ return nil, err
+ }
+
+ switch state {
+ case startState: // no runes read yet
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ return nil, io.EOF
+ }
+ case spaceRuneClass:
+ {
+ }
+ case escapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ tokenType = WordToken
+ state = escapingState
+ }
+ case commentRuneClass:
+ {
+ tokenType = CommentToken
+ state = commentState
+ }
+ default:
+ {
+ tokenType = WordToken
+ value = append(value, nextRune)
+ state = inWordState
+ }
+ }
+ }
+ case inWordState: // in a regular word
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingState: // the rune after an escape character
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = inWordState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingQuotedState: // the next rune after an escape character, in double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = quotingEscapingState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingEscapingState: // in escaping double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingQuotedState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingState: // in non-escaping single quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case commentState: // in a comment
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ if nextRune == '\n' {
+ state = startState
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ } else {
+ value = append(value, nextRune)
+ }
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ default:
+ {
+ return nil, fmt.Errorf("Unexpected state: %v", state)
+ }
+ }
+ }
+}
+
+// Next returns the next token in the stream.
+func (t *Tokenizer) Next() (*Token, error) {
+ return t.scanStream()
+}
+
+// Split partitions a string into a slice of strings.
+func Split(s string) ([]string, error) {
+ l := NewLexer(strings.NewReader(s))
+ subStrings := make([]string, 0)
+ for {
+ word, err := l.Next()
+ if err != nil {
+ if err == io.EOF {
+ return subStrings, nil
+ }
+ return subStrings, err
+ }
+ subStrings = append(subStrings, word)
+ }
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
index 2a31cd3c5..bb27ac936 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
@@ -9,7 +9,7 @@ func InitLabels(options []string) (string, string, error) {
return "", "", nil
}
-func GetROMountLabel() string {
+func ROMountLabel() string {
return ""
}
@@ -25,7 +25,19 @@ func SetProcessLabel(processLabel string) error {
return nil
}
-func GetFileLabel(path string) (string, error) {
+func ProcessLabel() (string, error) {
+ return "", nil
+}
+
+func SetSocketLabel(processLabel string) error {
+ return nil
+}
+
+func SocketLabel() (string, error) {
+ return "", nil
+}
+
+func FileLabel(path string) (string, error) {
return "", nil
}
@@ -41,7 +53,7 @@ func Relabel(path string, fileLabel string, shared bool) error {
return nil
}
-func GetPidLabel(pid int) (string, error) {
+func PidLabel(pid int) (string, error) {
return "", nil
}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
index 63c4edd05..de214b2d5 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
@@ -95,6 +95,17 @@ func SetProcessLabel(processLabel string) error {
return selinux.SetExecLabel(processLabel)
}
+// SetSocketLabel takes a process label and tells the kernel to assign the
+// label to the next socket that gets created
+func SetSocketLabel(processLabel string) error {
+ return selinux.SetSocketLabel(processLabel)
+}
+
+// SocketLabel retrieves the current default socket label setting
+func SocketLabel() (string, error) {
+ return selinux.SocketLabel()
+}
+
// ProcessLabel returns the process label that the kernel will assign
// to the next program executed by the current process. If "" is returned
// this indicates that the default labeling will happen for the process.
@@ -102,7 +113,7 @@ func ProcessLabel() (string, error) {
return selinux.ExecLabel()
}
-// GetFileLabel returns the label for specified path
+// FileLabel returns the label for specified path
func FileLabel(path string) (string, error) {
return selinux.FileLabel(path)
}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index 2cd54eac1..7832f7497 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -385,6 +385,17 @@ func SetExecLabel(label string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), label)
}
+// SetSocketLabel takes a process label and tells the kernel to assign the
+// label to the next socket that gets created
+func SetSocketLabel(label string) error {
+ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid()), label)
+}
+
+// SocketLabel retrieves the current socket label setting
+func SocketLabel() (string, error) {
+ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid()))
+}
+
// Get returns the Context as a string
func (c Context) Get() string {
if c["level"] != "" {
@@ -687,7 +698,11 @@ func Chcon(fpath string, label string, recurse bool) error {
return err
}
callback := func(p string, info os.FileInfo, err error) error {
- return SetFileLabel(p, label)
+ e := SetFileLabel(p, label)
+ if os.IsNotExist(e) {
+ return nil
+ }
+ return e
}
if recurse {
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
index 5abf8a362..99efa155a 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
@@ -96,6 +96,19 @@ func SetExecLabel(label string) error {
return nil
}
+/*
+SetSocketLabel sets the SELinux label that the kernel will use for any programs
+that are executed by the current process thread, or an error.
+*/
+func SetSocketLabel(label string) error {
+ return nil
+}
+
+// SocketLabel retrieves the current socket label setting
+func SocketLabel() (string, error) {
+ return "", nil
+}
+
// Get returns the Context as a string
func (c Context) Get() string {
return ""
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
index 1c1afb119..d37965df6 100644
--- a/vendor/github.com/openshift/imagebuilder/builder.go
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -172,8 +172,11 @@ type Stage struct {
Node *parser.Node
}
-func NewStages(node *parser.Node, b *Builder) Stages {
+func NewStages(node *parser.Node, b *Builder) (Stages, error) {
var stages Stages
+ if err := b.extractHeadingArgsFromNode(node); err != nil {
+ return stages, err
+ }
for i, root := range SplitBy(node, command.From) {
name, _ := extractNameFromNode(root.Children[0])
if len(name) == 0 {
@@ -189,7 +192,36 @@ func NewStages(node *parser.Node, b *Builder) Stages {
Node: root,
})
}
- return stages
+ return stages, nil
+}
+
+func (b *Builder) extractHeadingArgsFromNode(node *parser.Node) error {
+ var args []*parser.Node
+ var children []*parser.Node
+ extract := true
+ for _, child := range node.Children {
+ if extract && child.Value == command.Arg {
+ args = append(args, child)
+ } else {
+ if child.Value == command.From {
+ extract = false
+ }
+ children = append(children, child)
+ }
+ }
+
+ for _, c := range args {
+ step := b.Step()
+ if err := step.Resolve(c); err != nil {
+ return err
+ }
+ if err := b.Run(step, NoopExecutor, false); err != nil {
+ return err
+ }
+ }
+
+ node.Children = children
+ return nil
}
func extractNameFromNode(node *parser.Node) (string, bool) {
@@ -345,6 +377,9 @@ var ErrNoFROM = fmt.Errorf("no FROM statement found")
// is set to the first From found, or left unchanged if already
// set.
func (b *Builder) From(node *parser.Node) (string, error) {
+ if err := b.extractHeadingArgsFromNode(node); err != nil {
+ return "", err
+ }
children := SplitChildren(node, command.From)
switch {
case len(children) == 0:
diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go
index 068d5cc6f..f6510c2fd 100644
--- a/vendor/github.com/openshift/imagebuilder/dispatchers.go
+++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go
@@ -27,11 +27,6 @@ var (
obRgex = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`)
)
-// dispatch with no layer / parsing. This is effectively not a command.
-func nullDispatch(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
- return nil
-}
-
// ENV foo bar
//
// Sets the environment variable foo to bar, also makes interpolation
@@ -181,6 +176,17 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri
}
name := args[0]
+
+ // Support ARG before from
+ argStrs := []string{}
+ for n, v := range b.Args {
+ argStrs = append(argStrs, n+"="+v)
+ }
+ var err error
+ if name, err = ProcessWord(name, argStrs); err != nil {
+ return err
+ }
+
// Windows cannot support a container with no base image.
if name == NoBaseImageSpecifier {
if runtime.GOOS == "windows" {
@@ -438,6 +444,7 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs
healthcheck := docker.HealthConfig{}
flags := flag.NewFlagSet("", flag.ContinueOnError)
+ flags.String("start-period", "", "")
flags.String("interval", "", "")
flags.String("timeout", "", "")
flRetries := flags.String("retries", "", "")
@@ -462,6 +469,12 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs
return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
}
+ period, err := parseOptInterval(flags.Lookup("start-period"))
+ if err != nil {
+ return err
+ }
+ healthcheck.StartPeriod = period
+
interval, err := parseOptInterval(flags.Lookup("interval"))
if err != nil {
return err
diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go
index 83263127e..e1cd5d6d6 100644
--- a/vendor/github.com/openshift/imagebuilder/evaluator.go
+++ b/vendor/github.com/openshift/imagebuilder/evaluator.go
@@ -122,8 +122,7 @@ func (b *Step) Resolve(ast *parser.Node) error {
envs := b.Env
for ast.Next != nil {
ast = ast.Next
- var str string
- str = ast.Value
+ str := ast.Value
if replaceEnvAllowed[cmd] {
var err error
var words []string
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
index d3a8ae5fd..24822b2b7 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
@@ -18,76 +18,102 @@ import (
// #include "builtin.go.h"
import "C"
+// Repo represents a local ostree repository
type Repo struct {
- //*glib.GObject
ptr unsafe.Pointer
}
-// Converts an ostree repo struct to its C equivalent
+// isInitialized checks if the repo has been initialized
+func (r *Repo) isInitialized() bool {
+ if r == nil || r.ptr == nil {
+ return false
+ }
+ return true
+}
+
+// native converts an ostree repo struct to its C equivalent
func (r *Repo) native() *C.OstreeRepo {
- //return (*C.OstreeRepo)(r.Ptr())
+ if !r.isInitialized() {
+ return nil
+ }
return (*C.OstreeRepo)(r.ptr)
}
-// Takes a C ostree repo and converts it to a Go struct
-func repoFromNative(p *C.OstreeRepo) *Repo {
- if p == nil {
+// repoFromNative takes a C ostree repo and converts it to a Go struct
+func repoFromNative(or *C.OstreeRepo) *Repo {
+ if or == nil {
return nil
}
- //o := (*glib.GObject)(unsafe.Pointer(p))
- //r := &Repo{o}
- r := &Repo{unsafe.Pointer(p)}
+ r := &Repo{unsafe.Pointer(or)}
return r
}
-// Checks if the repo has been initialized
-func (r *Repo) isInitialized() bool {
- if r.ptr != nil {
- return true
+// OpenRepo attempts to open the repo at the given path
+func OpenRepo(path string) (*Repo, error) {
+ if path == "" {
+ return nil, errors.New("empty path")
}
- return false
-}
-// Attempts to open the repo at the given path
-func OpenRepo(path string) (*Repo, error) {
- var cerr *C.GError = nil
cpath := C.CString(path)
- pathc := C.g_file_new_for_path(cpath)
- defer C.g_object_unref(C.gpointer(pathc))
- crepo := C.ostree_repo_new(pathc)
+ defer C.free(unsafe.Pointer(cpath))
+ repoPath := C.g_file_new_for_path(cpath)
+ defer C.g_object_unref(C.gpointer(repoPath))
+ crepo := C.ostree_repo_new(repoPath)
repo := repoFromNative(crepo)
+
+ var cerr *C.GError
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr)))
if !r {
return nil, generateError(cerr)
}
+
return repo, nil
}
-// Enable support for tombstone commits, which allow the repo to distinguish between
-// commits that were intentionally deleted and commits that were removed accidentally
-func enableTombstoneCommits(repo *Repo) error {
- var tombstoneCommits bool
- var config *C.GKeyFile = C.ostree_repo_get_config(repo.native())
- var cerr *C.GError
+// enableTombstoneCommits enables support for tombstone commits.
+//
+// This allows to distinguish between intentional deletions and accidental removals
+// of commits.
+func (r *Repo) enableTombstoneCommits() error {
+ if !r.isInitialized() {
+ return errors.New("repo not initialized")
+ }
- tombstoneCommits = glib.GoBool(glib.GBoolean(C.g_key_file_get_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), nil)))
+ config := C.ostree_repo_get_config(r.native())
+ groupC := C.CString("core")
+ defer C.free(unsafe.Pointer(groupC))
+ keyC := C.CString("tombstone-commits")
+ defer C.free(unsafe.Pointer(keyC))
+ valueC := C.g_key_file_get_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), nil)
+ tombstoneCommits := glib.GoBool(glib.GBoolean(valueC))
- //tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file
+ // tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file
if !tombstoneCommits {
- C.g_key_file_set_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), C.TRUE)
- if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(repo.native(), config, &cerr))) {
+ var cerr *C.GError
+ C.g_key_file_set_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), C.TRUE)
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(r.native(), config, &cerr))) {
return generateError(cerr)
}
}
return nil
}
+// generateError wraps a GLib error into a Go one.
func generateError(err *C.GError) error {
+ if err == nil {
+ return errors.New("nil GError")
+ }
+
goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err)))
_, file, line, ok := runtime.Caller(1)
if ok {
- return errors.New(fmt.Sprintf("%s:%d - %s", file, line, goErr))
- } else {
- return goErr
+ return fmt.Errorf("%s:%d - %s", file, line, goErr)
}
+ return goErr
+}
+
+// isOk wraps a return value (gboolean/gint) into a bool.
+// 0 is false/error, everything else is true/ok.
+func isOk(v C.int) bool {
+ return glib.GoBool(glib.GBoolean(v))
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
index 734de9821..76171554d 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
@@ -33,24 +33,12 @@ _ostree_repo_file(GFile *file)
return OSTREE_REPO_FILE (file);
}
-static guint
-_gpointer_to_uint (gpointer ptr)
-{
- return GPOINTER_TO_UINT (ptr);
-}
-
static gpointer
_guint_to_pointer (guint u)
{
return GUINT_TO_POINTER (u);
}
-static void
-_g_clear_object (volatile GObject **object_ptr)
-{
- g_clear_object(object_ptr);
-}
-
static const GVariantType*
_g_variant_type (char *type)
{
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
index 55b51bfbd..04ada1792 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
@@ -1,7 +1,7 @@
package otbuiltin
import (
- "strings"
+ "errors"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
@@ -14,34 +14,42 @@ import (
// #include "builtin.go.h"
import "C"
-// Global variable for options
-var checkoutOpts checkoutOptions
-
-// Contains all of the options for checking commits out of
-// an ostree repo
+// checkoutOptions defines all of the options for checking commits
+// out of an ostree repo
+//
+// Note: while this is private, fields are public and part of the API.
type checkoutOptions struct {
- UserMode bool // Do not change file ownership or initialize extended attributes
- Union bool // Keep existing directories and unchanged files, overwriting existing filesystem
- AllowNoent bool // Do nothing if the specified filepath does not exist
- DisableCache bool // Do not update or use the internal repository uncompressed object caceh
- Whiteouts bool // Process 'whiteout' (docker style) entries
- RequireHardlinks bool // Do not fall back to full copies if hard linking fails
- Subpath string // Checkout sub-directory path
- FromFile string // Process many checkouts from the given file
+ // UserMode defines whether to checkout a repo in `bare-user` mode
+ UserMode bool
+ // Union specifies whether to overwrite existing filesystem entries
+ Union bool
+ // AllowNoEnt defines whether to skip filepaths that do not exist
+ AllowNoent bool
+ // DisableCache defines whether to disable internal repository uncompressed object cache
+ DisableCache bool
+ // Whiteouts defines whether to Process 'whiteout' (docker style) entries
+ Whiteouts bool
+ // RequireHardlinks defines whether to fall back to full copies if hard linking fails
+ RequireHardlinks bool
+ // SubPath specifies a sub-directory to use for checkout
+ Subpath string
+ // FromFile specifies an optional file containing many checkouts to process
+ FromFile string
}
-// Instantiates and returns a checkoutOptions struct with default values set
+// NewCheckoutOptions instantiates and returns a checkoutOptions struct with default values set
func NewCheckoutOptions() checkoutOptions {
return checkoutOptions{}
}
-// Checks out a commit with the given ref from a repository at the location of repo path to to the destination. Returns an error if the checkout could not be processed
-func Checkout(repoPath, destination, commit string, opts checkoutOptions) error {
- checkoutOpts = opts
-
+// Checkout checks out commit `commitRef` from a repository at `repoPath`,
+// writing it to `destination`. Returns an error if the checkout could not be processed.
+func Checkout(repoPath, destination, commitRef string, opts checkoutOptions) error {
var cancellable *glib.GCancellable
- ccommit := C.CString(commit)
+
+ ccommit := C.CString(commitRef)
defer C.free(unsafe.Pointer(ccommit))
+
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
@@ -53,50 +61,48 @@ func Checkout(repoPath, destination, commit string, opts checkoutOptions) error
return generateError(cerr)
}
- if strings.Compare(checkoutOpts.FromFile, "") != 0 {
- err := processManyCheckouts(crepo, destination, cancellable)
- if err != nil {
- return err
- }
- } else {
- var resolvedCommit *C.char
- defer C.free(unsafe.Pointer(resolvedCommit))
- if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) {
- return generateError(cerr)
- }
- err := processOneCheckout(crepo, resolvedCommit, checkoutOpts.Subpath, destination, cancellable)
- if err != nil {
- return err
- }
+ // Multiple checkouts to process
+ if opts.FromFile != "" {
+ return processManyCheckouts(crepo, destination, cancellable)
}
- return nil
+
+ // Simple single checkout
+ var resolvedCommit *C.char
+ defer C.free(unsafe.Pointer(resolvedCommit))
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) {
+ return generateError(cerr)
+ }
+
+ return processOneCheckout(crepo, resolvedCommit, destination, opts, cancellable)
}
-// Processes one checkout from the repo
-func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, subpath, destination string, cancellable *glib.GCancellable) error {
+// processOneCheckout processes one checkout from the repo
+func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, destination string, opts checkoutOptions, cancellable *glib.GCancellable) error {
cdest := C.CString(destination)
defer C.free(unsafe.Pointer(cdest))
+
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions
- if checkoutOpts.UserMode {
+ // Process options into bitflags
+ var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions
+ if opts.UserMode {
repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER
}
- if checkoutOpts.Union {
+ if opts.Union {
repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES
}
- checkedOut := glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr)))
- if !checkedOut {
+ // Checkout commit to destination
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr))) {
return generateError(cerr)
}
return nil
}
-// process many checkouts
+// processManyCheckouts processes many checkouts in a single batch
func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error {
- return nil
+ return errors.New("batch checkouts processing: not implemented")
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
index 9550f802c..ccaff7a10 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
@@ -59,11 +59,11 @@ func NewCommitOptions() commitOptions {
}
type OstreeRepoTransactionStats struct {
- metadata_objects_total int32
+ metadata_objects_total int32
metadata_objects_written int32
- content_objects_total int32
- content_objects_written int32
- content_bytes_written uint64
+ content_objects_total int32
+ content_objects_written int32
+ content_bytes_written uint64
}
func (repo *Repo) PrepareTransaction() (bool, error) {
@@ -125,6 +125,7 @@ func (repo *Repo) RegenerateSummary() error {
// Commits a directory, specified by commitPath, to an ostree repo as a given branch
func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) {
+ // TODO(lucab): `options` is global un-synchronized mutable state, get rid of it.
options = opts
var err error
@@ -140,7 +141,7 @@ func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string,
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
var metadata *C.GVariant = nil
- defer func(){
+ defer func() {
if metadata != nil {
defer C.g_variant_unref(metadata)
}
@@ -196,7 +197,7 @@ func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string,
}
if options.AddDetachedMetadataString != nil {
- _, err := parseKeyValueStrings(options.AddDetachedMetadataString)
+ _, err = parseKeyValueStrings(options.AddDetachedMetadataString)
if err != nil {
goto out
}
@@ -476,7 +477,7 @@ func handleStatOverrideLine(line string, table *glib.GHashTable) error {
// Handle an individual line from a Skiplist file
func handleSkipListline(line string, table *glib.GHashTable) error {
- C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer( C.g_strdup((*C.gchar)(C.CString(line)))))
+ C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line)))))
return nil
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
index c1ca2dc7e..6ee6671b4 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
@@ -1,11 +1,8 @@
package otbuiltin
import (
- "errors"
"strings"
"unsafe"
-
- glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
@@ -15,43 +12,37 @@ import (
// #include "builtin.go.h"
import "C"
-// Declare variables for options
-var initOpts initOptions
-
-// Contains all of the options for initializing an ostree repo
+// initOptions contains all of the options for initializing an ostree repo
+//
+// Note: while this is private, exported fields are public and part of the API.
type initOptions struct {
- Mode string // either bare, archive-z2, or bare-user
-
- repoMode C.OstreeRepoMode
+ // Mode defines repository mode: either bare, archive-z2, or bare-user
+ Mode string
}
-// Instantiates and returns an initOptions struct with default values set
+// NewInitOptions instantiates and returns an initOptions struct with default values set
func NewInitOptions() initOptions {
- io := initOptions{}
- io.Mode = "bare"
- io.repoMode = C.OSTREE_REPO_MODE_BARE
- return io
+ return initOptions{
+ Mode: "bare",
+ }
}
-// Initializes a new ostree repository at the given path. Returns true
+// Init initializes a new ostree repository at the given path. Returns true
// if the repo exists at the location, regardless of whether it was initialized
// by the function or if it already existed. Returns an error if the repo could
// not be initialized
func Init(path string, options initOptions) (bool, error) {
- initOpts = options
- err := parseMode()
+ repoMode, err := parseRepoMode(options.Mode)
if err != nil {
return false, err
}
// Create a repo struct from the path
- var cerr *C.GError
- defer C.free(unsafe.Pointer(cerr))
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
pathc := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(pathc))
- crepo := C.ostree_repo_new(pathc)
+ repo := C.ostree_repo_new(pathc)
// If the repo exists in the filesystem, return an error but set exists to true
/* var exists C.gboolean = 0
@@ -63,28 +54,31 @@ func Init(path string, options initOptions) (bool, error) {
return false, generateError(cerr)
}*/
- cerr = nil
- created := glib.GoBool(glib.GBoolean(C.ostree_repo_create(crepo, initOpts.repoMode, nil, &cerr)))
- if !created {
- errString := generateError(cerr).Error()
- if strings.Contains(errString, "File exists") {
- return true, generateError(cerr)
+ var cErr *C.GError
+ defer C.free(unsafe.Pointer(cErr))
+ if r := C.ostree_repo_create(repo, repoMode, nil, &cErr); !isOk(r) {
+ err := generateError(cErr)
+ if strings.Contains(err.Error(), "File exists") {
+ return true, err
}
- return false, generateError(cerr)
+ return false, err
}
return true, nil
}
-// Converts the mode string to a C.OSTREE_REPO_MODE enum value
-func parseMode() error {
- if strings.EqualFold(initOpts.Mode, "bare") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_BARE
- } else if strings.EqualFold(initOpts.Mode, "bare-user") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_BARE_USER
- } else if strings.EqualFold(initOpts.Mode, "archive-z2") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_ARCHIVE_Z2
- } else {
- return errors.New("Invalid option for mode")
+// parseRepoMode converts a mode string to a C.OSTREE_REPO_MODE enum value
+func parseRepoMode(modeLabel string) (C.OstreeRepoMode, error) {
+ var cErr *C.GError
+ defer C.free(unsafe.Pointer(cErr))
+
+ cModeLabel := C.CString(modeLabel)
+ defer C.free(unsafe.Pointer(cModeLabel))
+
+ var retMode C.OstreeRepoMode
+ if r := C.ostree_repo_mode_from_string(cModeLabel, &retMode, &cErr); !isOk(r) {
+ // NOTE(lucab): zero-value for this C enum has no special/invalid meaning.
+ return C.OSTREE_REPO_MODE_BARE, generateError(cErr)
}
- return nil
+
+ return retMode, nil
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
index 2ceea0925..d57498215 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
@@ -2,7 +2,6 @@ package otbuiltin
import (
"fmt"
- "strings"
"time"
"unsafe"
@@ -16,13 +15,7 @@ import (
// #include "builtin.go.h"
import "C"
-// Declare variables for options
-var logOpts logOptions
-
-// Set the format of the strings in the log
-const formatString = "2006-01-02 03:04;05 -0700"
-
-// Struct for the various pieces of data in a log entry
+// LogEntry is a struct for the various pieces of data in a log entry
type LogEntry struct {
Checksum []byte
Variant []byte
@@ -39,24 +32,25 @@ func (l LogEntry) String() string {
return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant)
}
-type OstreeDumpFlags uint
+type ostreeDumpFlags uint
const (
- OSTREE_DUMP_NONE OstreeDumpFlags = 0
- OSTREE_DUMP_RAW OstreeDumpFlags = 1 << iota
+ ostreeDumpNone ostreeDumpFlags = 0
+ ostreeDumpRaw ostreeDumpFlags = 1 << iota
)
-// Contains all of the options for initializing an ostree repo
+// logOptions contains all of the options for initializing an ostree repo
type logOptions struct {
- Raw bool // Show raw variant data
+ // Raw determines whether to show raw variant data
+ Raw bool
}
-//Instantiates and returns a logOptions struct with default values set
+// NewLogOptions instantiates and returns a logOptions struct with default values set
func NewLogOptions() logOptions {
return logOptions{}
}
-// Show the logs of a branch starting with a given commit or ref. Returns a
+// Log shows the logs of a branch starting with a given commit or ref. Returns a
// slice of log entries on success and an error otherwise
func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
// attempt to open the repository
@@ -69,12 +63,12 @@ func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
defer C.free(unsafe.Pointer(cbranch))
var checksum *C.char
defer C.free(unsafe.Pointer(checksum))
- var flags OstreeDumpFlags = OSTREE_DUMP_NONE
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
- if logOpts.Raw {
- flags |= OSTREE_DUMP_RAW
+ flags := ostreeDumpNone
+ if options.Raw {
+ flags |= ostreeDumpRaw
}
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) {
@@ -84,84 +78,86 @@ func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
return logCommit(repo, checksum, false, flags)
}
-func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags OstreeDumpFlags) ([]LogEntry, error) {
+func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags ostreeDumpFlags) ([]LogEntry, error) {
var variant *C.GVariant
- var parent *C.char
- defer C.free(unsafe.Pointer(parent))
var gerr = glib.NewGError()
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- entries := make([]LogEntry, 0, 1)
- var err error
if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) {
if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) {
return nil, nil
}
- return entries, generateError(cerr)
+ return nil, generateError(cerr)
}
- nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags)
-
- // get the parent of this commit
- parent = (*C.char)(C.ostree_commit_get_parent(variant))
+ // Get the parent of this commit
+ parent := (*C.char)(C.ostree_commit_get_parent(variant))
defer C.free(unsafe.Pointer(parent))
+
+ entries := make([]LogEntry, 0, 1)
if parent != nil {
+ var err error
entries, err = logCommit(repo, parent, true, flags)
if err != nil {
return nil, err
}
}
- entries = append(entries, *nextLogEntry)
+
+ nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags)
+ entries = append(entries, nextLogEntry)
+
return entries, nil
}
-func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags OstreeDumpFlags) *LogEntry {
- objLog := new(LogEntry)
- objLog.Checksum = []byte(C.GoString(checksum))
+func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags ostreeDumpFlags) LogEntry {
+ csum := []byte(C.GoString(checksum))
- if (flags & OSTREE_DUMP_RAW) != 0 {
- dumpVariant(objLog, variant)
- return objLog
+ if (flags & ostreeDumpRaw) != 0 {
+ return dumpVariant(variant, csum)
}
switch objectType {
case C.OSTREE_OBJECT_TYPE_COMMIT:
- dumpCommit(objLog, variant, flags)
- return objLog
+ return dumpCommit(variant, flags, csum)
default:
- return objLog
+ return LogEntry{
+ Checksum: csum,
+ }
}
}
-func dumpVariant(log *LogEntry, variant *C.GVariant) {
- var byteswappedVariant *C.GVariant
-
+func dumpVariant(variant *C.GVariant, csum []byte) LogEntry {
+ var logVariant []byte
if C.G_BYTE_ORDER != C.G_BIG_ENDIAN {
- byteswappedVariant = C.g_variant_byteswap(variant)
- log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
+ byteswappedVariant := C.g_variant_byteswap(variant)
+ logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
} else {
- log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
+ logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(variant, C.TRUE))))
+ }
+
+ return LogEntry{
+ Checksum: csum,
+ Variant: logVariant,
}
}
-func dumpCommit(log *LogEntry, variant *C.GVariant, flags OstreeDumpFlags) {
- var subject, body *C.char
+func dumpCommit(variant *C.GVariant, flags ostreeDumpFlags, csum []byte) LogEntry {
+ var subject *C.char
defer C.free(unsafe.Pointer(subject))
+ var body *C.char
defer C.free(unsafe.Pointer(body))
- var timestamp C.guint64
+ var timeBigE C.guint64
- C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timestamp)
+ C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timeBigE)
- // Timestamp is now a Unix formatted timestamp as a guint64
- timestamp = C._guint64_from_be(timestamp)
- log.Timestamp = time.Unix((int64)(timestamp), 0)
-
- if strings.Compare(C.GoString(subject), "") != 0 {
- log.Subject = C.GoString(subject)
- }
+ // Translate to a host-endian epoch and convert to Go timestamp
+ timeHostE := C._guint64_from_be(timeBigE)
+ timestamp := time.Unix((int64)(timeHostE), 0)
- if strings.Compare(C.GoString(body), "") != 0 {
- log.Body = C.GoString(body)
+ return LogEntry{
+ Timestamp: timestamp,
+ Subject: C.GoString(subject),
+ Body: C.GoString(body),
}
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
index 8dfa40a55..532522fc5 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
@@ -145,7 +145,7 @@ func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancella
}
}
- if err := enableTombstoneCommits(repo); err != nil {
+ if err := repo.enableTombstoneCommits(); err != nil {
return err
}
@@ -169,7 +169,7 @@ func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *gl
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- if err := enableTombstoneCommits(repo); err != nil {
+ if err := repo.enableTombstoneCommits(); err != nil {
return err
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go
+++ /dev/null