summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/buildah/README.md1
-rw-r--r--vendor/github.com/containers/buildah/buildah.go1
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go45
-rw-r--r--vendor/github.com/containers/buildah/common.go28
-rw-r--r--vendor/github.com/containers/buildah/delete.go3
-rw-r--r--vendor/github.com/containers/buildah/image.go1
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go128
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go140
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/util.go25
-rw-r--r--vendor/github.com/containers/buildah/new.go81
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go8
-rw-r--r--vendor/github.com/containers/buildah/pull.go5
-rw-r--r--vendor/github.com/containers/buildah/run.go195
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.go4
-rw-r--r--vendor/github.com/containers/buildah/util/util.go61
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf10
-rw-r--r--vendor/github.com/containers/storage/README.md2
-rw-r--r--vendor/github.com/containers/storage/containers.go10
-rw-r--r--vendor/github.com/containers/storage/containers_ffjson.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go24
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go15
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go25
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go9
-rw-r--r--vendor/github.com/containers/storage/layers.go17
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/parser.go56
-rw-r--r--vendor/github.com/containers/storage/store.go118
-rw-r--r--vendor/github.com/containers/storage/vendor.conf4
-rw-r--r--vendor/github.com/google/shlex/COPYING202
-rw-r--r--vendor/github.com/google/shlex/README2
-rw-r--r--vendor/github.com/google/shlex/shlex.go416
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go6
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go39
-rw-r--r--vendor/github.com/openshift/imagebuilder/dispatchers.go23
-rw-r--r--vendor/github.com/openshift/imagebuilder/evaluator.go3
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go94
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h12
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go96
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go15
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go72
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go110
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go4
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go1
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go0
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go0
91 files changed, 1609 insertions, 646 deletions
diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md
index 6a79e524b..2b539bba8 100644
--- a/vendor/github.com/containers/buildah/README.md
+++ b/vendor/github.com/containers/buildah/README.md
@@ -107,6 +107,7 @@ $ sudo ./lighttpd.sh
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
+| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 9994d6cd0..1a642ed3d 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -224,6 +224,7 @@ func GetBuildInfo(b *Builder) BuilderInfo {
ContainerID: b.ContainerID,
MountPoint: b.MountPoint,
ProcessLabel: b.ProcessLabel,
+ MountLabel: b.MountLabel,
ImageAnnotations: b.ImageAnnotations,
ImageCreatedBy: b.ImageCreatedBy,
OCIv1: b.OCIv1,
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 51e2d2bd4..6a1400e61 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -955,6 +955,20 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
return nil
}
+func makeReadOnly(mntpoint string, flags uintptr) error {
+ var fs unix.Statfs_t
+ // Make sure it's read-only.
+ if err := unix.Statfs(mntpoint, &fs); err != nil {
+ return errors.Wrapf(err, "error checking if directory %q was bound read-only", mntpoint)
+ }
+ if fs.Flags&unix.ST_RDONLY == 0 {
+ if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil {
+ return errors.Wrapf(err, "error remounting %s in mount namespace read-only", mntpoint)
+ }
+ }
+ return nil
+}
+
// setupChrootBindMounts actually bind mounts things under the rootfs, and returns a
// callback that will clean up its work.
func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func() error, err error) {
@@ -976,7 +990,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
bindFlags := commonFlags | unix.MS_NODEV
devFlags := commonFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY
procFlags := devFlags | unix.MS_NODEV
- sysFlags := devFlags | unix.MS_NODEV | unix.MS_RDONLY
+ sysFlags := devFlags | unix.MS_NODEV
// Bind /dev read-only.
subDev := filepath.Join(spec.Root.Path, "/dev")
@@ -1030,13 +1044,22 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
}
}
- // Make sure it's read-only.
- if err = unix.Statfs(subSys, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys)
+ if err := makeReadOnly(subSys, sysFlags); err != nil {
+ return undoBinds, err
}
- if fs.Flags&unix.ST_RDONLY == 0 {
- if err := unix.Mount(subSys, subSys, "bind", sysFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting /sys in mount namespace read-only")
+
+ mnts, _ := mount.GetMounts()
+ for _, m := range mnts {
+ if !strings.HasPrefix(m.Mountpoint, "/sys/") &&
+ m.Mountpoint != "/sys" {
+ continue
+ }
+ subSys := filepath.Join(spec.Root.Path, m.Mountpoint)
+ if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil {
+ return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
+ }
+ if err := makeReadOnly(subSys, sysFlags); err != nil {
+ return undoBinds, err
}
}
logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys"))
@@ -1044,10 +1067,6 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// Add /sys/fs/selinux to the set of masked paths, to ensure that we don't have processes
// attempting to interact with labeling, when they aren't allowed to do so.
spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux")
- // Add /sys/fs/cgroup to the set of masked paths, to ensure that we don't have processes
- // attempting to mess with cgroup configuration, when they aren't allowed to do so.
- spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup")
-
// Bind mount in everything we've been asked to mount.
for _, m := range spec.Mounts {
// Skip anything that we just mounted.
@@ -1143,11 +1162,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
logrus.Debugf("mounted a tmpfs to %q", target)
}
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys)
+ return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target)
}
if uintptr(fs.Flags)&expectedFlags != expectedFlags {
if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags")
+ return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags", target)
}
}
}
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index 56a901925..be59215df 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -2,12 +2,14 @@ package buildah
import (
"io"
-
- "github.com/sirupsen/logrus"
+ "os"
+ "path/filepath"
cp "github.com/containers/image/copy"
"github.com/containers/image/transports"
"github.com/containers/image/types"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/sirupsen/logrus"
)
const (
@@ -17,10 +19,20 @@ const (
DOCKER = "docker"
)
+// userRegistriesFile is the path to the per user registry configuration file.
+var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/registries.conf")
+
func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
sourceCtx := &types.SystemContext{}
if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext
+ } else {
+ if rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sourceCtx.SystemRegistriesConfPath = userRegistriesFile
+ }
+
+ }
}
sourceInsecure, err := isReferenceInsecure(sourceReference, sourceCtx)
if err != nil {
@@ -33,6 +45,12 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference
destinationCtx := &types.SystemContext{}
if destinationSystemContext != nil {
*destinationCtx = *destinationSystemContext
+ } else {
+ if rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ destinationCtx.SystemRegistriesConfPath = userRegistriesFile
+ }
+ }
}
destinationInsecure, err := isReferenceInsecure(destinationReference, destinationCtx)
if err != nil {
@@ -58,5 +76,11 @@ func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string)
if signaturePolicyPath != "" {
sc.SignaturePolicyPath = signaturePolicyPath
}
+ if sc.SystemRegistriesConfPath == "" && rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sc.SystemRegistriesConfPath = userRegistriesFile
+ }
+
+ }
return sc
}
diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go
index 25f76cf74..e3bddba20 100644
--- a/vendor/github.com/containers/buildah/delete.go
+++ b/vendor/github.com/containers/buildah/delete.go
@@ -1,7 +1,6 @@
package buildah
import (
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
@@ -14,5 +13,5 @@ func (b *Builder) Delete() error {
b.MountPoint = ""
b.Container = ""
b.ContainerID = ""
- return label.ReleaseLabel(b.ProcessLabel)
+ return nil
}
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 31aff9eea..c0bf90ddd 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -107,7 +107,6 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
// compression that we'll be applying.
func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) {
omediaType = v1.MediaTypeImageLayer
- //TODO: Convert to manifest.DockerV2Schema2LayerUncompressedMediaType once available
dmediaType = docker.V2S2MediaTypeUncompressedLayer
if i.compression != archive.Uncompressed {
switch i.compression {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 41d85cbc6..701241683 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -222,7 +222,7 @@ type Executor struct {
forceRmIntermediateCtrs bool
containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build
imageMap map[string]string // Used to map images that we create to handle the AS construct.
-
+ copyFrom string // Used to keep track of the --from flag from COPY and ADD
}
// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
@@ -563,39 +563,39 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
registry: options.Registry,
transport: options.Transport,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
- quiet: options.Quiet,
- runtime: options.Runtime,
- runtimeArgs: options.RuntimeArgs,
- transientMounts: options.TransientMounts,
- compression: options.Compression,
- output: options.Output,
- outputFormat: options.OutputFormat,
- additionalTags: options.AdditionalTags,
- signaturePolicyPath: options.SignaturePolicyPath,
- systemContext: options.SystemContext,
- volumeCache: make(map[string]string),
- volumeCacheInfo: make(map[string]os.FileInfo),
- log: options.Log,
- in: options.In,
- out: options.Out,
- err: options.Err,
- reportWriter: options.ReportWriter,
- isolation: options.Isolation,
- namespaceOptions: options.NamespaceOptions,
- configureNetwork: options.ConfigureNetwork,
- cniPluginPath: options.CNIPluginPath,
- cniConfigDir: options.CNIConfigDir,
- idmappingOptions: options.IDMappingOptions,
- commonBuildOptions: options.CommonBuildOpts,
- defaultMountsFilePath: options.DefaultMountsFilePath,
- iidfile: options.IIDFile,
- squash: options.Squash,
- labels: append([]string{}, options.Labels...),
- annotations: append([]string{}, options.Annotations...),
- layers: options.Layers,
- noCache: options.NoCache,
- removeIntermediateCtrs: options.RemoveIntermediateCtrs,
- forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ quiet: options.Quiet,
+ runtime: options.Runtime,
+ runtimeArgs: options.RuntimeArgs,
+ transientMounts: options.TransientMounts,
+ compression: options.Compression,
+ output: options.Output,
+ outputFormat: options.OutputFormat,
+ additionalTags: options.AdditionalTags,
+ signaturePolicyPath: options.SignaturePolicyPath,
+ systemContext: options.SystemContext,
+ volumeCache: make(map[string]string),
+ volumeCacheInfo: make(map[string]os.FileInfo),
+ log: options.Log,
+ in: options.In,
+ out: options.Out,
+ err: options.Err,
+ reportWriter: options.ReportWriter,
+ isolation: options.Isolation,
+ namespaceOptions: options.NamespaceOptions,
+ configureNetwork: options.ConfigureNetwork,
+ cniPluginPath: options.CNIPluginPath,
+ cniConfigDir: options.CNIConfigDir,
+ idmappingOptions: options.IDMappingOptions,
+ commonBuildOptions: options.CommonBuildOpts,
+ defaultMountsFilePath: options.DefaultMountsFilePath,
+ iidfile: options.IIDFile,
+ squash: options.Squash,
+ labels: append([]string{}, options.Labels...),
+ annotations: append([]string{}, options.Annotations...),
+ layers: options.Layers,
+ noCache: options.NoCache,
+ removeIntermediateCtrs: options.RemoveIntermediateCtrs,
+ forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -764,7 +764,7 @@ func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) {
if err != nil {
candidates, _, err := util.ResolveName(b.output, "", b.systemContext, b.store)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q: %v", b.output)
+ return nil, errors.Wrapf(err, "error parsing target image name %q", b.output)
}
if len(candidates) == 0 {
return nil, errors.Errorf("error parsing target image name %q", b.output)
@@ -826,6 +826,18 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
err error
imgID string
)
+
+ b.copyFrom = ""
+ // Check if --from exists in the step command of COPY or ADD
+ // If it exists, set b.copyfrom to that value
+ for _, n := range step.Flags {
+ if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") {
+ arr := strings.Split(n, "=")
+ b.copyFrom = b.named[arr[1]].mountPoint
+ break
+ }
+ }
+
// checkForLayers will be true if b.layers is true and a cached intermediate image is found.
// checkForLayers is set to false when either there is no cached image or a break occurs where
// the instructions in the Dockerfile change from a previous build.
@@ -848,6 +860,7 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error
if err := b.copyExistingImage(ctx, cacheID); err != nil {
return err
}
+ b.containerIDs = append(b.containerIDs, b.builder.ContainerID)
break
}
@@ -1009,6 +1022,11 @@ func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) {
currNode = currNode.Next
continue
}
+ if b.copyFrom != "" {
+ src = append(src, filepath.Join(b.copyFrom, currNode.Value))
+ currNode = currNode.Next
+ continue
+ }
matches, err := filepath.Glob(filepath.Join(b.contextDir, currNode.Value))
if err != nil {
return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value)
@@ -1044,17 +1062,19 @@ func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (
}
continue
}
- // For local files, walk the file tree and check the time stamps.
- timeIsGreater := false
- err := filepath.Walk(item, func(path string, info os.FileInfo, err error) error {
- if info.ModTime().After(*historyTime) {
- timeIsGreater = true
- return nil
- }
- return nil
- })
+ // Walks the file tree for local files and uses chroot to ensure we don't escape out of the allowed path
+ // when resolving any symlinks.
+ // Change the time format to ensure we don't run into a parsing error when converting again from string
+ // to time.Time. It is a known Go issue that the conversions cause errors sometimes, so specifying a particular
+ // time format here when converting to a string.
+ // If the COPY has --from in the command, change the rootdir to mountpoint of the container it is copying from
+ rootdir := b.contextDir
+ if b.copyFrom != "" {
+ rootdir = b.copyFrom
+ }
+ timeIsGreater, err := resolveModifiedTime(rootdir, item, historyTime.Format(time.RFC3339Nano))
if err != nil {
- return false, errors.Wrapf(err, "error walking file tree %q", item)
+ return false, errors.Wrapf(err, "error resolving symlinks and comparing modified times: %q", item)
}
if timeIsGreater {
return false, nil
@@ -1289,15 +1309,24 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
} else {
// If the Dockerfile isn't found try prepending the
// context directory to it.
- if _, err := os.Stat(dfile); os.IsNotExist(err) {
+ dinfo, err := os.Stat(dfile)
+ if os.IsNotExist(err) {
dfile = filepath.Join(options.ContextDirectory, dfile)
}
+ dinfo, err = os.Stat(dfile)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
+ }
+ // If given a directory, add '/Dockerfile' to it.
+ if dinfo.Mode().IsDir() {
+ dfile = filepath.Join(dfile, "Dockerfile")
+ }
logrus.Debugf("reading local Dockerfile %q", dfile)
contents, err := os.Open(dfile)
if err != nil {
return "", nil, errors.Wrapf(err, "error reading %q", dfile)
}
- dinfo, err := contents.Stat()
+ dinfo, err = contents.Stat()
if err != nil {
contents.Close()
return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
@@ -1336,7 +1365,10 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
return "", nil, errors.Wrapf(err, "error creating build executor")
}
b := imagebuilder.NewBuilder(options.Args)
- stages := imagebuilder.NewStages(mainNode, b)
+ stages, err := imagebuilder.NewStages(mainNode, b)
+ if err != nil {
+ return "", nil, errors.Wrap(err, "error reading multiple stages")
+ }
return exec.Build(ctx, stages)
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
index 20e396f1f..edb5837db 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"strings"
+ "time"
"github.com/containers/storage/pkg/reexec"
"github.com/pkg/errors"
@@ -14,13 +15,18 @@ import (
const (
symlinkChrootedCommand = "chrootsymlinks-resolve"
+ symlinkModifiedTime = "modtimesymlinks-resolve"
maxSymlinksResolved = 40
)
func init() {
reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks)
+ reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified)
}
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
func resolveChrootedSymlinks() {
status := 0
flag.Parse()
@@ -39,7 +45,7 @@ func resolveChrootedSymlinks() {
}
// Our second parameter is the path name to evaluate for symbolic links
- symLink, err := getSymbolicLink(flag.Arg(0), flag.Arg(1))
+ symLink, err := getSymbolicLink(flag.Arg(1))
if err != nil {
fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err)
os.Exit(1)
@@ -51,7 +57,8 @@ func resolveChrootedSymlinks() {
os.Exit(status)
}
-// ResolveSymlink resolves any symlink in filename in the context of rootdir.
+// ResolveSymLink (in the grandparent process) resolves any symlink in filename
+// in the context of rootdir.
func ResolveSymLink(rootdir, filename string) (string, error) {
// The child process expects a chroot and one path that
// will be consulted relative to the chroot directory and evaluated
@@ -62,32 +69,128 @@ func ResolveSymLink(rootdir, filename string) (string, error) {
return "", errors.Wrapf(err, string(output))
}
- // Hand back the resolved symlink, will be "" if a symlink is not found
+ // Hand back the resolved symlink, will be filename if a symlink is not found
return string(output), nil
}
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
+func resolveSymlinkTimeModified() {
+ status := 0
+ flag.Parse()
+ if len(flag.Args()) < 1 {
+ os.Exit(1)
+ }
+ // Our first parameter is the directory to chroot into.
+ if err := unix.Chdir(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chdir(): %v\n", err)
+ os.Exit(1)
+ }
+ if err := unix.Chroot(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chroot(): %v\n", err)
+ os.Exit(1)
+ }
+
+ // Our second parameter is the path name to evaluate for symbolic links.
+ // Our third parameter is the time the cached intermediate image was created.
+ // We check whether the modified time of the filepath we provide is after the time the cached image was created.
+ timeIsGreater, err := modTimeIsGreater(flag.Arg(0), flag.Arg(1), flag.Arg(2))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error checking if modified time of resolved symbolic link is greater: %v\n", err)
+ os.Exit(1)
+ }
+ if _, err := os.Stdout.WriteString(fmt.Sprintf("%v", timeIsGreater)); err != nil {
+ fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err)
+ os.Exit(1)
+ }
+ os.Exit(status)
+}
+
+// resolveModifiedTime (in the grandparent process) checks filename for any symlinks,
+// resolves it and compares the modified time of the file with historyTime, which is
+// the creation time of the cached image. It returns true if filename was modified after
+// historyTime, otherwise returns false.
+func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
+ // The child process expects a chroot and one path that
+ // will be consulted relative to the chroot directory and evaluated
+ // for any symbolic links present.
+ cmd := reexec.Command(symlinkModifiedTime, rootdir, filename, historyTime)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return false, errors.Wrapf(err, string(output))
+ }
+ // Hand back true/false depending on in the file was modified after the caches image was created.
+ return string(output) == "true", nil
+}
+
+// modTimeIsGreater goes through the files added/copied in using the Dockerfile and
+// checks the time stamp (follows symlinks) with the time stamp of when the cached
+// image was created. IT compares the two and returns true if the file was modified
+// after the cached image was created, otherwise it returns false.
+func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
+ var timeIsGreater bool
+
+ // Convert historyTime from string to time.Time for comparison
+ histTime, err := time.Parse(time.RFC3339Nano, historyTime)
+ if err != nil {
+ return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime)
+ }
+ // Walk the file tree and check the time stamps.
+ // Since we are chroot in rootdir, only want the path of the actual filename, i.e path - rootdir.
+ // +1 to account for the extra "/" (e.g rootdir=/home/user/mydir, path=/home/user/mydir/myfile.json)
+ err = filepath.Walk(path[len(rootdir)+1:], func(path string, info os.FileInfo, err error) error {
+ // If using cached images, it is possible for files that are being copied to come from
+ // previous build stages. But if using cached images, then the copied file won't exist
+ // since a container won't have been created for the previous build stage and info will be nil.
+ // In that case just return nil and continue on with using the cached image for the whole build process.
+ if info == nil {
+ return nil
+ }
+ modTime := info.ModTime()
+ if info.Mode()&os.ModeSymlink == os.ModeSymlink {
+ // Evaluate any symlink that occurs to get updated modified information
+ resolvedPath, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return errors.Wrapf(err, "error evaluating symlink %q", path)
+ }
+ fileInfo, err := os.Stat(resolvedPath)
+ if err != nil {
+ return errors.Wrapf(err, "error getting file info %q", resolvedPath)
+ }
+ modTime = fileInfo.ModTime()
+ }
+ if modTime.After(histTime) {
+ timeIsGreater = true
+ return nil
+ }
+ return nil
+ })
+ if err != nil {
+ return false, errors.Wrapf(err, "error walking file tree %q", path)
+ }
+ return timeIsGreater, err
+}
+
// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear.
// Returns what the whole target path for what "path" resolves to.
-func getSymbolicLink(rootdir, path string) (string, error) {
+func getSymbolicLink(path string) (string, error) {
var (
symPath string
symLinksResolved int
)
-
- // Splitting path as we need to resolve each parth of the path at a time
+ // Splitting path as we need to resolve each part of the path at a time
splitPath := strings.Split(path, "/")
if splitPath[0] == "" {
splitPath = splitPath[1:]
symPath = "/"
}
-
for _, p := range splitPath {
// If we have resolved 40 symlinks, that means something is terribly wrong
// will return an error and exit
if symLinksResolved >= maxSymlinksResolved {
return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved)
}
-
symPath = filepath.Join(symPath, p)
isSymlink, resolvedPath, err := hasSymlink(symPath)
if err != nil {
@@ -119,16 +222,21 @@ func getSymbolicLink(rootdir, path string) (string, error) {
// otherwise it returns false and path
func hasSymlink(path string) (bool, string, error) {
info, err := os.Lstat(path)
- if os.IsNotExist(err) {
- if err = os.MkdirAll(path, 0755); err != nil {
- return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
- }
- info, err = os.Lstat(path)
- if err != nil {
- return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(path, 0755); err != nil {
+ return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
+ }
+ info, err = os.Lstat(path)
+ if err != nil {
+ return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ }
+ } else {
+ return false, path, errors.Wrapf(err, "error get stat of path %q", path)
}
}
- // Return false and path as path is not a symlink
+
+ // Return false and path as path if not a symlink
if info.Mode()&os.ModeSymlink != os.ModeSymlink {
return false, path, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go
index 35dc5438a..4f5301b73 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/util.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/util.go
@@ -111,3 +111,28 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
func InitReexec() bool {
return buildah.InitReexec()
}
+
+// ReposToMap parses the specified repotags and returns a map with repositories
+// as keys and the corresponding arrays of tags as values.
+func ReposToMap(repotags []string) map[string][]string {
+ // map format is repo -> tag
+ repos := make(map[string][]string)
+ for _, repo := range repotags {
+ var repository, tag string
+ if strings.Contains(repo, ":") {
+ li := strings.LastIndex(repo, ":")
+ repository = repo[0:li]
+ tag = repo[li+1:]
+ } else if len(repo) > 0 {
+ repository = repo
+ tag = "<none>"
+ } else {
+ logrus.Warnf("Found image with empty name")
+ }
+ repos[repository] = append(repos[repository], tag)
+ }
+ if len(repos) == 0 {
+ repos["<none>"] = []string{"<none>"}
+ }
+ return repos
+}
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 8b0e774ba..13bebf420 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -3,6 +3,7 @@ package buildah
import (
"context"
"fmt"
+ "math/rand"
"strings"
"github.com/containers/buildah/util"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/storage"
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/openshift/imagebuilder"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -23,11 +23,6 @@ const (
// as "no image".
BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
- // DefaultTransport is a prefix that we apply to an image name if we
- // can't find one in the local Store, in order to generate a source
- // reference for the image that we can then copy to the local Store.
- DefaultTransport = "docker://"
-
// minimumTruncatedIDLength is the minimum length of an identifier that
// we'll accept as possibly being a truncated image ID.
minimumTruncatedIDLength = 3
@@ -150,7 +145,7 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
}
logrus.Debugf("error parsing image name %q as given, trying with transport %q: %v", image, options.Transport, err)
transport := options.Transport
- if transport != DefaultTransport {
+ if transport != util.DefaultTransport {
transport = transport + ":"
}
srcRef2, err := alltransports.ParseImageName(transport + image)
@@ -232,6 +227,27 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
}
}
+func containerNameExist(name string, containers []storage.Container) bool {
+ for _, container := range containers {
+ for _, cname := range container.Names {
+ if cname == name {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func findUnusedContainer(name string, containers []storage.Container) string {
+ suffix := 1
+ tmpName := name
+ for containerNameExist(tmpName, containers) {
+ tmpName = fmt.Sprintf("%s-%d", name, suffix)
+ suffix++
+ }
+ return tmpName
+}
+
func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
var ref types.ImageReference
var img *storage.Image
@@ -241,7 +257,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
options.FromImage = ""
}
if options.Transport == "" {
- options.Transport = DefaultTransport
+ options.Transport = util.DefaultTransport
}
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
@@ -277,23 +293,33 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
name = imageNamePrefix(image) + "-" + name
}
}
+ var container *storage.Container
+ tmpName := name
+ if options.Container == "" {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to check for container names")
+ }
+ tmpName = findUnusedContainer(tmpName, containers)
+ }
- coptions := storage.ContainerOptions{}
- coptions.IDMappingOptions = newContainerIDMappingOptions(options.IDMappingOptions)
-
- container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions)
- suffix := 1
- for err != nil && errors.Cause(err) == storage.ErrDuplicateName && options.Container == "" {
- suffix++
- tmpName := fmt.Sprintf("%s-%d", name, suffix)
- if container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions); err == nil {
+ conflict := 100
+ for true {
+ coptions := storage.ContainerOptions{
+ LabelOpts: options.CommonBuildOpts.LabelOpts,
+ IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions),
+ }
+ container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions)
+ if err == nil {
name = tmpName
+ break
}
+ if errors.Cause(err) != storage.ErrDuplicateName || options.Container != "" {
+ return nil, errors.Wrapf(err, "error creating container")
+ }
+ tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict)
+ conflict = conflict * 10
}
- if err != nil {
- return nil, errors.Wrapf(err, "error creating container")
- }
-
defer func() {
if err != nil {
if err2 := store.DeleteContainer(container.ID); err2 != nil {
@@ -302,13 +328,6 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
}()
- if err = ReserveSELinuxLabels(store, container.ID); err != nil {
- return nil, err
- }
- processLabel, mountLabel, err := label.InitLabels(options.CommonBuildOpts.LabelOpts)
- if err != nil {
- return nil, err
- }
uidmap, gidmap := convertStorageIDMaps(container.UIDMap, container.GIDMap)
defaultNamespaceOptions, err := DefaultNamespaceOptions()
@@ -328,8 +347,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
ContainerID: container.ID,
ImageAnnotations: map[string]string{},
ImageCreatedBy: "",
- ProcessLabel: processLabel,
- MountLabel: mountLabel,
+ ProcessLabel: container.ProcessLabel(),
+ MountLabel: container.MountLabel(),
DefaultMountsFilePath: options.DefaultMountsFilePath,
Isolation: options.Isolation,
NamespaceOptions: namespaceOptions,
@@ -351,7 +370,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
if options.Mount {
- _, err = builder.Mount(mountLabel)
+ _, err = builder.Mount(container.MountLabel())
if err != nil {
return nil, errors.Wrapf(err, "error mounting build container %q", builder.ContainerID)
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index b54663f5d..03b340294 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -71,6 +71,10 @@ var (
LayerFlags = []cli.Flag{
cli.BoolFlag{
+ Name: "force-rm",
+ Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
+ },
+ cli.BoolFlag{
Name: "layers",
Usage: fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()),
},
@@ -115,10 +119,6 @@ var (
Name: "file, f",
Usage: "`pathname or URL` of a Dockerfile",
},
- cli.BoolFlag{
- Name: "force-rm",
- Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
- },
cli.StringFlag{
Name: "format",
Usage: "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.",
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 52269541a..1a51edb0e 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -146,11 +146,11 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
srcRef, err := alltransports.ParseImageName(spec)
if err != nil {
if options.Transport == "" {
- options.Transport = DefaultTransport
+ options.Transport = util.DefaultTransport
}
logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, options.Transport, err)
transport := options.Transport
- if transport != DefaultTransport {
+ if transport != util.DefaultTransport {
transport = transport + ":"
}
spec = transport + spec
@@ -201,6 +201,7 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
logrus.Debugf("copying %q to %q", spec, destName)
if _, err := cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, destRef, nil, "")); err != nil {
+ logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", spec, destName, err)
return nil, err
}
return destRef, nil
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index 718ef4e36..5d2cd6a32 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -451,7 +451,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
copyWithTar := b.copyWithTar(nil, nil)
- builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes)
+ builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
if err != nil {
return err
}
@@ -493,15 +493,21 @@ func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts
return mounts, nil
}
-func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string) ([]specs.Mount, error) {
+func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
var mounts []specs.Mount
+ hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
for _, volume := range builtinVolumes {
subdir := digest.Canonical.FromString(volume).Hex()
volumePath := filepath.Join(containerDir, "buildah-volumes", subdir)
+ srcPath := filepath.Join(mountPoint, volume)
+ initializeVolume := false
// If we need to, initialize the volume path's initial contents.
- if _, err := os.Stat(volumePath); err != nil && os.IsNotExist(err) {
+ if _, err := os.Stat(volumePath); err != nil {
+ if !os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "failed to stat %q for volume %q", volumePath, volume)
+ }
logrus.Debugf("setting up built-in volume at %q", volumePath)
if err = os.MkdirAll(volumePath, 0755); err != nil {
return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume)
@@ -509,11 +515,21 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWit
if err = label.Relabel(volumePath, mountLabel, false); err != nil {
return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume)
}
- srcPath := filepath.Join(mountPoint, volume)
- stat, err := os.Stat(srcPath)
- if err != nil {
+ initializeVolume = true
+ }
+ stat, err := os.Stat(srcPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
}
+ if err = idtools.MkdirAllAndChownNew(srcPath, 0755, hostOwner); err != nil {
+ return nil, errors.Wrapf(err, "error creating directory %q for volume %q", srcPath, volume)
+ }
+ if stat, err = os.Stat(srcPath); err != nil {
+ return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
+ }
+ }
+ if initializeVolume {
if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
return nil, errors.Wrapf(err, "failed to chmod %q for volume %q", volumePath, volume)
}
@@ -1044,24 +1060,31 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
- hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair)
- if err != nil {
- return err
- }
- resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
- if err != nil {
- return err
- }
+ bindFiles := make(map[string]string)
+ namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...)
+ volumes := b.Volumes()
- if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
- return err
+ if !contains(volumes, "/etc/hosts") {
+ hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair)
+ if err != nil {
+ return err
+ }
+ bindFiles["/etc/hosts"] = hostFile
+
+ if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
+ return err
+ }
}
- bindFiles := map[string]string{
- "/etc/hosts": hostFile,
- "/etc/resolv.conf": resolvFile,
+ if !contains(volumes, "/etc/resolv.conf") {
+ resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
+ if err != nil {
+ return err
+ }
+ bindFiles["/etc/resolv.conf"] = resolvFile
}
- err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, append(b.NamespaceOptions, options.NamespaceOptions...))
+
+ err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions)
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID)
}
@@ -1081,41 +1104,35 @@ func (b *Builder) Run(command []string, options RunOptions) error {
switch isolation {
case IsolationOCI:
- // The default is --rootless=auto, which makes troubleshooting a bit harder.
- // rootlessFlag := []string{"--rootless=false"}
- // for _, arg := range options.Args {
- // if strings.HasPrefix(arg, "--rootless") {
- // rootlessFlag = nil
- // }
- // }
- // options.Args = append(options.Args, rootlessFlag...)
var moreCreateArgs []string
if options.NoPivot {
moreCreateArgs = []string{"--no-pivot"}
} else {
moreCreateArgs = nil
}
- err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
case IsolationChroot:
err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr)
case IsolationOCIRootless:
if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil {
return err
}
- rootlessFlag := []string{"--rootless=true"}
- for _, arg := range options.Args {
- if strings.HasPrefix(arg, "--rootless") {
- rootlessFlag = nil
- }
- }
- options.Args = append(options.Args, rootlessFlag...)
- err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
default:
err = errors.Errorf("don't know how to run this command")
}
return err
}
+func contains(volumes []string, v string) bool {
+ for _, i := range volumes {
+ if i == v {
+ return true
+ }
+ }
+ return false
+}
+
func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) error {
switch isolation {
case IsolationOCIRootless:
@@ -1123,10 +1140,22 @@ func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions)
logrus.Debugf("Forcing use of an IPC namespace.")
}
options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.IPCNamespace)})
- if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host {
- logrus.Debugf("Disabling network namespace.")
+ _, err := exec.LookPath("slirp4netns")
+ hostNetworking := err != nil
+ networkNamespacePath := ""
+ if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
+ hostNetworking = ns.Host
+ networkNamespacePath = ns.Path
+ if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) {
+ logrus.Debugf("Disabling network namespace configuration.")
+ networkNamespacePath = ""
+ }
}
- options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
+ options.NamespaceOptions.AddOrReplace(NamespaceOption{
+ Name: string(specs.NetworkNamespace),
+ Host: hostNetworking,
+ Path: networkNamespacePath,
+ })
if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host {
logrus.Debugf("Forcing use of a PID namespace.")
}
@@ -1227,9 +1256,10 @@ type runUsingRuntimeSubprocOptions struct {
ConfigureNetworks []string
MoreCreateArgs []string
ContainerName string
+ Isolation Isolation
}
-func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
+func (b *Builder) runUsingRuntimeSubproc(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
var confwg sync.WaitGroup
config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
Options: options,
@@ -1240,6 +1270,7 @@ func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bo
ConfigureNetworks: configureNetworks,
MoreCreateArgs: moreCreateArgs,
ContainerName: containerName,
+ Isolation: isolation,
})
if conferr != nil {
return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand)
@@ -1318,7 +1349,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
// Run the container, start to finish.
- status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
+ status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
if err != nil {
fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
os.Exit(1)
@@ -1333,7 +1364,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
-func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
+func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
// Lock the caller to a single OS-level thread.
runtime.LockOSThread()
@@ -1490,7 +1521,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetwork
}()
if configureNetwork {
- teardown, err := runConfigureNetwork(options, configureNetworks, pid, containerName, spec.Process.Args)
+ teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, spec.Process.Args)
if teardown != nil {
defer teardown()
}
@@ -1623,9 +1654,81 @@ func runCollectOutput(fds, closeBeforeReadingFds []int) string {
}
return b.String()
}
+func setupRootlessNetwork(pid int) (teardown func(), err error) {
+ slirp4netns, err := exec.LookPath("slirp4netns")
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot find slirp4netns")
+ }
+
+ rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe")
+ }
+ defer rootlessSlirpSyncR.Close()
+
+ // Be sure there are no fds inherited to slirp4netns except the sync pipe
+ files, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot list open fds")
+ }
+ for _, f := range files {
+ fd, err := strconv.Atoi(f.Name())
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot parse fd")
+ }
+ if fd == int(rootlessSlirpSyncW.Fd()) {
+ continue
+ }
+ unix.CloseOnExec(fd)
+ }
+
+ cmd := exec.Command(slirp4netns, "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0")
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil
+ cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW}
+
+ err = cmd.Start()
+ rootlessSlirpSyncW.Close()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot start slirp4netns")
+ }
-func runConfigureNetwork(options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) {
+ b := make([]byte, 1)
+ for {
+ if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil {
+ return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout")
+ }
+ if _, err := rootlessSlirpSyncR.Read(b); err == nil {
+ break
+ } else {
+ if os.IsTimeout(err) {
+ // Check if the process is still running.
+ var status syscall.WaitStatus
+ _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to read slirp4netns process status")
+ }
+ if status.Exited() || status.Signaled() {
+ return nil, errors.New("slirp4netns failed")
+ }
+
+ continue
+ }
+ return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe")
+ }
+ }
+
+ return func() {
+ cmd.Process.Kill()
+ cmd.Wait()
+ }, nil
+}
+
+func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) {
var netconf, undo []*libcni.NetworkConfigList
+
+ if isolation == IsolationOCIRootless {
+ return setupRootlessNetwork(pid)
+ }
// Scan for CNI configuration files.
confdir := options.CNIConfigDir
files, err := libcni.ConfFiles(confdir, []string{".conf"})
@@ -1956,7 +2059,7 @@ func runAcceptTerminal(consoleListener *net.UnixListener, terminalSize *specs.Bo
for i := range scm {
fds, err := unix.ParseUnixRights(&scm[i])
if err != nil {
- return -1, errors.Wrapf(err, "error parsing unix rights control message: %v")
+ return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i])
}
logrus.Debugf("fds: %v", fds)
if len(fds) == 0 {
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go
index fbe623660..2a970b8d6 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/unshare/unshare.go
@@ -55,6 +55,10 @@ func (c *Cmd) Start() error {
}
c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags))
+ // Please the libpod "rootless" package to find the expected env variables.
+ c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done")
+ c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid()))
+
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 3a415a7f3..b2451b78b 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -7,10 +7,8 @@ import (
"net/url"
"os"
"path"
- "path/filepath"
"strconv"
"strings"
- "syscall"
"github.com/containers/image/directory"
dockerarchive "github.com/containers/image/docker/archive"
@@ -31,6 +29,10 @@ import (
const (
minimumTruncatedIDLength = 3
+ // DefaultTransport is a prefix that we apply to an image name if we
+ // can't find one in the local Store, in order to generate a source
+ // reference for the image that we can then copy to the local Store.
+ DefaultTransport = "docker://"
)
var (
@@ -89,6 +91,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
}
}
+ name = strings.TrimPrefix(name, DefaultTransport)
// If the image name already included a domain component, we're done.
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
@@ -450,60 +453,6 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap,
return uid, gid, nil
}
-// UnsharedRootPath returns a location under ($XDG_DATA_HOME/containers/storage,
-// or $HOME/.local/share/containers/storage, or
-// (the user's home directory)/.local/share/containers/storage, or an error.
-func UnsharedRootPath(homedir string) (string, error) {
- // If $XDG_DATA_HOME is defined...
- if envDataHome, haveDataHome := os.LookupEnv("XDG_DATA_HOME"); haveDataHome {
- return filepath.Join(envDataHome, "containers", "storage"), nil
- }
- // If $XDG_DATA_HOME is not defined, but $HOME is defined...
- if envHomedir, haveHomedir := os.LookupEnv("HOME"); haveHomedir {
- // Default to the user's $HOME/.local/share/containers/storage subdirectory.
- return filepath.Join(envHomedir, ".local", "share", "containers", "storage"), nil
- }
- // If we know where our home directory is...
- if homedir != "" {
- // Default to the user's homedir/.local/share/containers/storage subdirectory.
- return filepath.Join(homedir, ".local", "share", "containers", "storage"), nil
- }
- return "", errors.New("unable to determine a --root location: neither $XDG_DATA_HOME nor $HOME is set")
-}
-
-// UnsharedRunrootPath returns $XDG_RUNTIME_DIR/run, /var/run/user/(the user's UID)/run, or an error.
-func UnsharedRunrootPath(uid string) (string, error) {
- // If $XDG_RUNTIME_DIR is defined...
- if envRuntimeDir, haveRuntimeDir := os.LookupEnv("XDG_RUNTIME_DIR"); haveRuntimeDir {
- return filepath.Join(envRuntimeDir, "run"), nil
- }
- var runtimeDir string
- // If $XDG_RUNTIME_DIR is not defined, but we know our UID...
- if uid != "" {
- tmpDir := filepath.Join("/var/run/user", uid)
- os.MkdirAll(tmpDir, 0700)
- st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Getuid() && st.Mode().Perm() == 0700 {
- runtimeDir = tmpDir
- }
- }
- if runtimeDir == "" {
- home := os.Getenv("HOME")
- if home == "" {
- return "", errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- }
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", errors.Wrapf(err, "cannot resolve %s", home)
- }
- runtimeDir = filepath.Join(resolvedHome, "rundir")
- }
- if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
- return "", errors.New("could not set XDG_RUNTIME_DIR")
- }
- return runtimeDir, nil
-}
-
// GetPolicyContext sets up, initializes and returns a new context for the specified policy
func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) {
policy, err := signature.DefaultPolicy(ctx)
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index d79412afc..185cde449 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -3,9 +3,9 @@ github.com/blang/semver master
github.com/BurntSushi/toml master
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
-github.com/containers/image 5e5b67d6b1cf43cc349128ec3ed7d5283a6cc0d1
-github.com/containers/libpod 2afadeec6696fefac468a49c8ba24b0bc275aa75
-github.com/containers/storage 41294c85d97bef688e18f710402895dbecde3308
+github.com/containers/image de7be82ee3c7fb676bf6cfdc9090be7cc28f404c
+github.com/containers/libpod fe4f09493f41f675d24c969d1b60d1a6a45ddb9e
+github.com/containers/storage 3161726d1db0d0d4e86a9667dd476f09b997f497
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
@@ -36,9 +36,9 @@ github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc master
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-tools master
-github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
+github.com/opencontainers/selinux master
github.com/openshift/imagebuilder master
-github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
+github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
github.com/pborman/uuid master
github.com/pkg/errors master
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md
index f68cc55c3..fef46a689 100644
--- a/vendor/github.com/containers/storage/README.md
+++ b/vendor/github.com/containers/storage/README.md
@@ -2,7 +2,7 @@
layers, container images, and containers. A `containers-storage` CLI wrapper
is also included for manual and scripting use.
-To build the CLI wrapper, use 'make build-binary'.
+To build the CLI wrapper, use 'make binary'.
Operations which use VMs expect to launch them using 'vagrant', defaulting to
using its 'libvirt' provider. The boxes used are also available for the
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index 0a125331d..beaf41f39 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -147,6 +147,13 @@ func (c *Container) ProcessLabel() string {
return ""
}
+func (c *Container) MountOpts() []string {
+ if mountOpts, ok := c.Flags["MountOpts"].([]string); ok {
+ return mountOpts
+ }
+ return nil
+}
+
func (r *containerStore) Containers() ([]Container, error) {
containers := make([]Container, len(r.containers))
for i := range r.containers {
@@ -293,6 +300,9 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
}
+ if options.MountOpts != nil {
+ options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
+ }
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go
index 6e83808d4..aef6becfe 100644
--- a/vendor/github.com/containers/storage/containers_ffjson.go
+++ b/vendor/github.com/containers/storage/containers_ffjson.go
@@ -1,6 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: containers.go
-//
package storage
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index f14ba24b9..ca69816be 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -405,7 +405,7 @@ func atomicRemove(source string) error {
case os.IsExist(err):
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
if _, e := os.Stat(source); !os.IsNotExist(e) {
- return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
+ return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target)
}
default:
return errors.Wrapf(err, "error preparing atomic delete")
@@ -441,7 +441,7 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
if len(parents) > 0 {
- if err := a.mount(id, m, options.MountLabel, parents); err != nil {
+ if err := a.mount(id, m, parents, options); err != nil {
return "", err
}
}
@@ -585,7 +585,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
return layers, nil
}
-func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
+func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error {
a.Lock()
defer a.Unlock()
@@ -596,7 +596,7 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str
rw := a.getDiffPath(id)
- if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
+ if err := a.aufsMount(layers, rw, target, options); err != nil {
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
}
return nil
@@ -643,7 +643,7 @@ func (a *Driver) Cleanup() error {
return mountpk.Unmount(a.root)
}
-func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
+func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
defer func() {
if err != nil {
Unmount(target)
@@ -657,7 +657,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
if useDirperm() {
offset += len(",dirperm1")
}
- b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
+ b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
index := 0
@@ -670,21 +670,25 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
}
opts := "dio,xino=/dev/shm/aufs.xino"
- if a.mountOptions != "" {
- opts += fmt.Sprintf(",%s", a.mountOptions)
+ mountOptions := a.mountOptions
+ if len(options.Options) > 0 {
+ mountOptions = strings.Join(options.Options, ",")
+ }
+ if mountOptions != "" {
+ opts += fmt.Sprintf(",%s", mountOptions)
}
if useDirperm() {
opts += ",dirperm1"
}
- data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
+ data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel)
if err = mount("none", target, "aufs", 0, data); err != nil {
return
}
for ; index < len(ro); index++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
- data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
+ data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel)
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
return
}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index adc34d209..567cda9d3 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -640,6 +640,9 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if err != nil {
return "", err
}
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("btrfs driver does not support mount options")
+ }
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index cbf67b3eb..2801dfdc5 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -2364,7 +2364,7 @@ func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
}
// MountDevice mounts the device if not already mounted.
-func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
+func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error {
info, err := devices.lookupDeviceWithLock(hash)
if err != nil {
return err
@@ -2396,8 +2396,17 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
options = joinMountOptions(options, "nouuid")
}
- options = joinMountOptions(options, devices.mountOptions)
- options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
+ mountOptions := devices.mountOptions
+ if len(moptions.Options) > 0 {
+ addNouuid := strings.Contains("nouuid", mountOptions)
+ mountOptions = strings.Join(moptions.Options, ",")
+ if addNouuid {
+ mountOptions = fmt.Sprintf("nouuid,", mountOptions)
+ }
+ }
+
+ options = joinMountOptions(options, mountOptions)
+ options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index 9fc082d7d..39a4fbe2c 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -9,8 +9,6 @@ import (
"path"
"strconv"
- "github.com/sirupsen/logrus"
-
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/devicemapper"
"github.com/containers/storage/pkg/idtools"
@@ -18,6 +16,7 @@ import (
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
+ "github.com/sirupsen/logrus"
)
func init() {
@@ -189,7 +188,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
// Mount the device
- if err := d.DeviceSet.MountDevice(id, mp, options.MountLabel); err != nil {
+ if err := d.DeviceSet.MountDevice(id, mp, options); err != nil {
d.ctr.Decrement(mp)
return "", err
}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 4569c7b59..476b55160 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -49,6 +49,7 @@ type MountOpts struct {
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
UidMaps []idtools.IDMap
GidMaps []idtools.IDMap
+ Options []string
}
// InitFunc initializes the storage driver.
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index 19da7d101..ab2c41e58 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/sirupsen/logrus"
)
@@ -167,7 +168,9 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin
}
defer driver.Put(id)
- options := &archive.TarOptions{}
+ options := &archive.TarOptions{
+ InUserNS: rsystem.RunningInUserNS(),
+ }
if applyMappings != nil {
options.UIDMaps = applyMappings.UIDs()
options.GIDMaps = applyMappings.GIDs()
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index d2cc65bca..df736c0a9 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -29,6 +29,7 @@ import (
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -340,6 +341,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
func (d *Driver) useNaiveDiff() bool {
useNaiveDiffLock.Do(func() {
+ if d.options.mountProgram != "" {
+ useNaiveDiffOnly = true
+ return
+ }
if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil {
logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
useNaiveDiffOnly = true
@@ -739,7 +744,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
workDir := path.Join(dir, "work")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
- if d.options.mountOptions != "" {
+ if len(options.Options) > 0 {
+ opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts)
+ } else if d.options.mountOptions != "" {
opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts)
}
mountData := label.FormatMountLabel(opts, options.MountLabel)
@@ -841,6 +848,17 @@ func (d *Driver) isParent(id, parent string) bool {
return ld == parentDir
}
+func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
+ whiteoutFormat := archive.OverlayWhiteoutFormat
+ if d.options.mountProgram != "" {
+ // If we are using a mount program, we are most likely running
+ // as an unprivileged user that cannot use mknod, so fallback to the
+ // AUFS whiteout format.
+ whiteoutFormat = archive.AUFSWhiteoutFormat
+ }
+ return whiteoutFormat
+}
+
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent string, mountLabel string, diff io.Reader) (size int64, err error) {
if !d.isParent(id, parent) {
@@ -858,7 +876,8 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str
if err := untar(diff, applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
- WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ WhiteoutFormat: d.getWhiteoutFormat(),
+ InUserNS: rsystem.RunningInUserNS(),
}); err != nil {
return 0, err
}
@@ -911,7 +930,7 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string,
Compression: archive.Uncompressed,
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
- WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ WhiteoutFormat: d.getWhiteoutFormat(),
WhiteoutData: lowerDirs,
})
}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index d10fb2607..e3a67a69b 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -181,6 +181,9 @@ func (d *Driver) Remove(id string) error {
// Get returns the directory for the given id.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
dir := d.dir(id)
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("vfs driver does not support mount options")
+ }
if st, err := os.Stat(dir); err != nil {
return "", err
} else if !st.IsDir() {
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 4ccf657dc..c6d86a4ab 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -367,6 +367,9 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel)
var dir string
+ if len(options.Options) > 0 {
+ return "", fmt.Errorf("windows driver does not support mount options")
+ }
rID, err := d.resolveID(id)
if err != nil {
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index cb4424f2d..c3ce6e869 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -52,7 +52,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
}
- file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600)
+ file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600)
if err != nil {
logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err)
return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
@@ -366,8 +366,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return mountpoint, nil
}
+ mountOptions := d.options.mountOptions
+ if len(options.Options) > 0 {
+ mountOptions = strings.Join(options.Options, ",")
+ }
+
filesystem := d.zfsPath(id)
- opts := label.FormatMountLabel(d.options.mountOptions, options.MountLabel)
+ opts := label.FormatMountLabel(mountOptions, options.MountLabel)
logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 1275ab47c..0b532eb77 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -28,9 +28,8 @@ import (
)
const (
- tarSplitSuffix = ".tar-split.gz"
- incompleteFlag = "incomplete"
- compressionFlag = "diff-compression"
+ tarSplitSuffix = ".tar-split.gz"
+ incompleteFlag = "incomplete"
)
// A Layer is a record of a copy-on-write layer that's stored by the lower
@@ -542,8 +541,8 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
_, idInUse = r.byid[id]
}
}
- if _, idInUse := r.byid[id]; idInUse {
- return nil, -1, ErrDuplicateID
+ if duplicateLayer, idInUse := r.byid[id]; idInUse {
+ return duplicateLayer, -1, ErrDuplicateID
}
names = dedupeNames(names)
for _, name := range names {
@@ -841,8 +840,12 @@ func (r *layerStore) Delete(id string) error {
return ErrLayerUnknown
}
id = layer.ID
- if _, err := r.Unmount(id, true); err != nil {
- return err
+ // This check is needed for idempotency of delete where the layer could have been
+ // already unmounted (since c/storage gives you that API directly)
+ for layer.MountCount > 0 {
+ if _, err := r.Unmount(id, false); err != nil {
+ return err
+ }
}
err := r.driver.Remove(id)
if err == nil {
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
index 09b5d0f33..125b5d8c9 100644
--- a/vendor/github.com/containers/storage/layers_ffjson.go
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./layers.go
+// source: layers.go
package storage
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 4c4382625..8d6eaacf3 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -22,6 +22,7 @@ import (
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/promise"
"github.com/containers/storage/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/sirupsen/logrus"
)
@@ -1054,6 +1055,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
GIDMaps: tarMappings.GIDs(),
Compression: Uncompressed,
CopyPass: true,
+ InUserNS: rsystem.RunningInUserNS(),
}
archive, err := TarWithOptions(src, options)
if err != nil {
@@ -1068,6 +1070,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1087,6 +1090,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1186,6 +1190,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
UIDMaps: archiver.UntarIDMappings.UIDs(),
GIDMaps: archiver.UntarIDMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
+ InUserNS: rsystem.RunningInUserNS(),
}
err = archiver.Untar(r, filepath.Dir(dst), options)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
deleted file mode 100644
index 70f9c5564..000000000
--- a/vendor/github.com/containers/storage/pkg/archive/example_changes.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build ignore
-
-// Simple tool to create an archive stream from an old and new directory
-//
-// By default it will stream the comparison of two temporary directories with junk files
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path"
-
- "github.com/containers/storage/pkg/archive"
- "github.com/sirupsen/logrus"
-)
-
-var (
- flDebug = flag.Bool("D", false, "debugging output")
- flNewDir = flag.String("newdir", "", "")
- flOldDir = flag.String("olddir", "", "")
- log = logrus.New()
-)
-
-func main() {
- flag.Usage = func() {
- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
- fmt.Printf("%s [OPTIONS]\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- log.Out = os.Stderr
- if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
- logrus.SetLevel(logrus.DebugLevel)
- }
- var newDir, oldDir string
-
- if len(*flNewDir) == 0 {
- var err error
- newDir, err = ioutil.TempDir("", "storage-test-newDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(newDir)
- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
- log.Fatal(err)
- }
- } else {
- newDir = *flNewDir
- }
-
- if len(*flOldDir) == 0 {
- oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(oldDir)
- } else {
- oldDir = *flOldDir
- }
-
- changes, err := archive.ChangesDirs(newDir, oldDir)
- if err != nil {
- log.Fatal(err)
- }
-
- a, err := archive.ExportChanges(newDir, changes)
- if err != nil {
- log.Fatal(err)
- }
- defer a.Close()
-
- i, err := io.Copy(os.Stdout, a)
- if err != nil && err != io.EOF {
- log.Fatal(err)
- }
- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
-}
-
-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
- fileData := []byte("fooo")
- for n := 0; n < numberOfFiles; n++ {
- fileName := fmt.Sprintf("file-%d", n)
- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
- return 0, err
- }
- if makeLinks {
- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
- return 0, err
- }
- }
- }
- totalSize := numberOfFiles * len(fileData)
- return totalSize, nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index b9fa228e6..dde8d44d3 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
)
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
@@ -52,6 +53,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
if options == nil {
options = &archive.TarOptions{}
+ options.InUserNS = rsystem.RunningInUserNS()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go
new file mode 100644
index 000000000..c56aa86a2
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go
@@ -0,0 +1,56 @@
+package idtools
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func nonDigitsToWhitespace(r rune) rune {
+ if !strings.ContainsRune("0123456789", r) {
+ return ' '
+ }
+ return r
+}
+
+func parseTriple(spec []string) (container, host, size uint32, err error) {
+ cid, err := strconv.ParseUint(spec[0], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
+ }
+ hid, err := strconv.ParseUint(spec[1], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
+ }
+ sz, err := strconv.ParseUint(spec[2], 10, 32)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
+ }
+ return uint32(cid), uint32(hid), uint32(sz), nil
+}
+
+// ParseIDMap parses idmap triples from string.
+func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
+ for _, idMapSpec := range mapSpec {
+ idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
+ if len(idSpec)%3 != 0 {
+ return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
+ }
+ for i := range idSpec {
+ if i%3 != 0 {
+ continue
+ }
+ cid, hid, size, err := parseTriple(idSpec[i : i+3])
+ if err != nil {
+ return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
+ }
+ mapping := IDMap{
+ ContainerID: int(cid),
+ HostID: int(hid),
+ Size: int(size),
+ }
+ idmap = append(idmap, mapping)
+ }
+ }
+ return idmap, nil
+}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 7eaa82910..e0dd1b92f 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -8,7 +8,6 @@ import (
"os"
"path/filepath"
"reflect"
- "strconv"
"strings"
"sync"
"time"
@@ -22,6 +21,7 @@ import (
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
digest "github.com/opencontainers/go-digest"
@@ -502,6 +502,7 @@ type ContainerOptions struct {
IDMappingOptions
LabelOpts []string
Flags map[string]interface{}
+ MountOpts []string
}
type store struct {
@@ -1069,7 +1070,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, read
}
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc)
if err != nil {
- return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q")
+ return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q", parentLayer.ID)
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
@@ -2145,21 +2146,20 @@ func (s *store) DeleteContainer(id string) error {
if err = rlstore.Delete(container.LayerID); err != nil {
return err
}
- if err = rcstore.Delete(id); err != nil {
- return err
- }
- middleDir := s.graphDriverName + "-containers"
- gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
- if err = os.RemoveAll(gcpath); err != nil {
- return err
- }
- rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
- if err = os.RemoveAll(rcpath); err != nil {
- return err
- }
- return nil
}
- return ErrNotALayer
+ if err = rcstore.Delete(id); err != nil {
+ return err
+ }
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
+ if err = os.RemoveAll(gcpath); err != nil {
+ return err
+ }
+ rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
+ if err = os.RemoveAll(rcpath); err != nil {
+ return err
+ }
+ return nil
}
}
return ErrNotAContainer
@@ -2280,10 +2280,14 @@ func (s *store) Version() ([][2]string, error) {
func (s *store) Mount(id, mountLabel string) (string, error) {
container, err := s.Container(id)
- var uidMap, gidMap []idtools.IDMap
+ var (
+ uidMap, gidMap []idtools.IDMap
+ mountOpts []string
+ )
if err == nil {
uidMap, gidMap = container.UIDMap, container.GIDMap
id = container.LayerID
+ mountOpts = container.MountOpts()
}
rlstore, err := s.LayerStore()
if err != nil {
@@ -2299,6 +2303,7 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
MountLabel: mountLabel,
UidMaps: uidMap,
GidMaps: gidMap,
+ Options: mountOpts,
}
return rlstore.Mount(id, options)
}
@@ -3203,56 +3208,19 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
storeOptions.UIDMap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs()
}
- nonDigitsToWhitespace := func(r rune) rune {
- if strings.IndexRune("0123456789", r) == -1 {
- return ' '
- } else {
- return r
- }
- }
- parseTriple := func(spec []string) (container, host, size uint32, err error) {
- cid, err := strconv.ParseUint(spec[0], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
- }
- hid, err := strconv.ParseUint(spec[1], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
- }
- sz, err := strconv.ParseUint(spec[2], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
- }
- return uint32(cid), uint32(hid), uint32(sz), nil
+
+ uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
+ if err != nil {
+ fmt.Print(err)
+ } else {
+ storeOptions.UIDMap = append(storeOptions.UIDMap, uidmap...)
}
- parseIDMap := func(idMapSpec, mapSetting string) (idmap []idtools.IDMap) {
- if len(idMapSpec) > 0 {
- idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
- if len(idSpec)%3 != 0 {
- fmt.Printf("Error initializing ID mappings: %s setting is malformed.\n", mapSetting)
- return nil
- }
- for i := range idSpec {
- if i%3 != 0 {
- continue
- }
- cid, hid, size, err := parseTriple(idSpec[i : i+3])
- if err != nil {
- fmt.Printf("Error initializing ID mappings: %s setting is malformed.\n", mapSetting)
- return nil
- }
- mapping := idtools.IDMap{
- ContainerID: int(cid),
- HostID: int(hid),
- Size: int(size),
- }
- idmap = append(idmap, mapping)
- }
- }
- return idmap
+ gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
+ if err != nil {
+ fmt.Print(err)
+ } else {
+ storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...)
}
- storeOptions.UIDMap = append(storeOptions.UIDMap, parseIDMap(config.Storage.Options.RemapUIDs, "remap-uids")...)
- storeOptions.GIDMap = append(storeOptions.GIDMap, parseIDMap(config.Storage.Options.RemapGIDs, "remap-gids")...)
if os.Getenv("STORAGE_DRIVER") != "" {
storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
}
@@ -3271,3 +3239,23 @@ func init() {
ReloadConfigurationFile(defaultConfigFile, &DefaultStoreOptions)
}
+
+func GetDefaultMountOptions() ([]string, error) {
+ mountOpts := []string{
+ ".mountopt",
+ fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName),
+ }
+ for _, option := range DefaultStoreOptions.GraphDriverOptions {
+ key, val, err := parsers.ParseKeyValueOpt(option)
+ if err != nil {
+ return nil, err
+ }
+ key = strings.ToLower(key)
+ for _, m := range mountOpts {
+ if m == key {
+ return strings.Split(val, ","), nil
+ }
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index 2276d5531..059ae94f0 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -2,13 +2,14 @@ github.com/BurntSushi/toml master
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
-github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
+github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
+github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/pkg/errors master
github.com/pmezard/go-difflib v1.0.0
@@ -20,4 +21,3 @@ github.com/tchap/go-patricia v2.2.6
github.com/vbatts/tar-split v0.10.2
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
-github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/shlex/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README
new file mode 100644
index 000000000..c86bcc066
--- /dev/null
+++ b/vendor/github.com/google/shlex/README
@@ -0,0 +1,2 @@
+go-shlex is a simple lexer for go that supports shell-style quoting,
+commenting, and escaping.
diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go
new file mode 100644
index 000000000..d98308bce
--- /dev/null
+++ b/vendor/github.com/google/shlex/shlex.go
@@ -0,0 +1,416 @@
+/*
+Copyright 2012 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package shlex implements a simple lexer which splits input in to tokens using
+shell-style rules for quoting and commenting.
+
+The basic use case uses the default ASCII lexer to split a string into sub-strings:
+
+ shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
+
+To process a stream of strings:
+
+ l := NewLexer(os.Stdin)
+ for ; token, err := l.Next(); err != nil {
+ // process token
+ }
+
+To access the raw token stream (which includes tokens for comments):
+
+ t := NewTokenizer(os.Stdin)
+ for ; token, err := t.Next(); err != nil {
+ // process token
+ }
+
+*/
+package shlex
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// TokenType is a top-level token classification: A word, space, comment, unknown.
+type TokenType int
+
+// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
+type runeTokenClass int
+
+// the internal state used by the lexer state machine
+type lexerState int
+
+// Token is a (type, value) pair representing a lexographical token.
+type Token struct {
+ tokenType TokenType
+ value string
+}
+
+// Equal reports whether tokens a, and b, are equal.
+// Two tokens are equal if both their types and values are equal. A nil token can
+// never be equal to another token.
+func (a *Token) Equal(b *Token) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.tokenType != b.tokenType {
+ return false
+ }
+ return a.value == b.value
+}
+
+// Named classes of UTF-8 runes
+const (
+ spaceRunes = " \t\r\n"
+ escapingQuoteRunes = `"`
+ nonEscapingQuoteRunes = "'"
+ escapeRunes = `\`
+ commentRunes = "#"
+)
+
+// Classes of rune token
+const (
+ unknownRuneClass runeTokenClass = iota
+ spaceRuneClass
+ escapingQuoteRuneClass
+ nonEscapingQuoteRuneClass
+ escapeRuneClass
+ commentRuneClass
+ eofRuneClass
+)
+
+// Classes of lexographic token
+const (
+ UnknownToken TokenType = iota
+ WordToken
+ SpaceToken
+ CommentToken
+)
+
+// Lexer state machine states
+const (
+ startState lexerState = iota // no runes have been seen
+ inWordState // processing regular runes in a word
+ escapingState // we have just consumed an escape rune; the next rune is literal
+ escapingQuotedState // we have just consumed an escape rune within a quoted string
+ quotingEscapingState // we are within a quoted string that supports escaping ("...")
+ quotingState // we are within a string that does not support escaping ('...')
+ commentState // we are within a comment (everything following an unquoted or unescaped #
+)
+
+// tokenClassifier is used for classifying rune characters.
+type tokenClassifier map[rune]runeTokenClass
+
+func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
+ for _, runeChar := range runes {
+ typeMap[runeChar] = tokenType
+ }
+}
+
+// newDefaultClassifier creates a new classifier for ASCII characters.
+func newDefaultClassifier() tokenClassifier {
+ t := tokenClassifier{}
+ t.addRuneClass(spaceRunes, spaceRuneClass)
+ t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
+ t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
+ t.addRuneClass(escapeRunes, escapeRuneClass)
+ t.addRuneClass(commentRunes, commentRuneClass)
+ return t
+}
+
+// ClassifyRune classifiees a rune
+func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
+ return t[runeVal]
+}
+
+// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
+type Lexer Tokenizer
+
+// NewLexer creates a new lexer from an input stream.
+func NewLexer(r io.Reader) *Lexer {
+
+ return (*Lexer)(NewTokenizer(r))
+}
+
+// Next returns the next word, or an error. If there are no more words,
+// the error will be io.EOF.
+func (l *Lexer) Next() (string, error) {
+ for {
+ token, err := (*Tokenizer)(l).Next()
+ if err != nil {
+ return "", err
+ }
+ switch token.tokenType {
+ case WordToken:
+ return token.value, nil
+ case CommentToken:
+ // skip comments
+ default:
+ return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
+ }
+ }
+}
+
+// Tokenizer turns an input stream into a sequence of typed tokens
+type Tokenizer struct {
+ input bufio.Reader
+ classifier tokenClassifier
+}
+
+// NewTokenizer creates a new tokenizer from an input stream.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ input := bufio.NewReader(r)
+ classifier := newDefaultClassifier()
+ return &Tokenizer{
+ input: *input,
+ classifier: classifier}
+}
+
+// scanStream scans the stream for the next token using the internal state machine.
+// It will panic if it encounters a rune which it does not know how to handle.
+func (t *Tokenizer) scanStream() (*Token, error) {
+ state := startState
+ var tokenType TokenType
+ var value []rune
+ var nextRune rune
+ var nextRuneType runeTokenClass
+ var err error
+
+ for {
+ nextRune, _, err = t.input.ReadRune()
+ nextRuneType = t.classifier.ClassifyRune(nextRune)
+
+ if err == io.EOF {
+ nextRuneType = eofRuneClass
+ err = nil
+ } else if err != nil {
+ return nil, err
+ }
+
+ switch state {
+ case startState: // no runes read yet
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ return nil, io.EOF
+ }
+ case spaceRuneClass:
+ {
+ }
+ case escapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ tokenType = WordToken
+ state = escapingState
+ }
+ case commentRuneClass:
+ {
+ tokenType = CommentToken
+ state = commentState
+ }
+ default:
+ {
+ tokenType = WordToken
+ value = append(value, nextRune)
+ state = inWordState
+ }
+ }
+ }
+ case inWordState: // in a regular word
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingState: // the rune after an escape character
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = inWordState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingQuotedState: // the next rune after an escape character, in double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = quotingEscapingState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingEscapingState: // in escaping double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingQuotedState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingState: // in non-escaping single quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case commentState: // in a comment
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ if nextRune == '\n' {
+ state = startState
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ } else {
+ value = append(value, nextRune)
+ }
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ default:
+ {
+ return nil, fmt.Errorf("Unexpected state: %v", state)
+ }
+ }
+ }
+}
+
+// Next returns the next token in the stream.
+func (t *Tokenizer) Next() (*Token, error) {
+ return t.scanStream()
+}
+
+// Split partitions a string into a slice of strings.
+func Split(s string) ([]string, error) {
+ l := NewLexer(strings.NewReader(s))
+ subStrings := make([]string, 0)
+ for {
+ word, err := l.Next()
+ if err != nil {
+ if err == io.EOF {
+ return subStrings, nil
+ }
+ return subStrings, err
+ }
+ subStrings = append(subStrings, word)
+ }
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index 2cd54eac1..bbaa1e0d7 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -687,7 +687,11 @@ func Chcon(fpath string, label string, recurse bool) error {
return err
}
callback := func(p string, info os.FileInfo, err error) error {
- return SetFileLabel(p, label)
+ e := SetFileLabel(p, label)
+ if os.IsNotExist(e) {
+ return nil
+ }
+ return e
}
if recurse {
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
index 1c1afb119..d37965df6 100644
--- a/vendor/github.com/openshift/imagebuilder/builder.go
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -172,8 +172,11 @@ type Stage struct {
Node *parser.Node
}
-func NewStages(node *parser.Node, b *Builder) Stages {
+func NewStages(node *parser.Node, b *Builder) (Stages, error) {
var stages Stages
+ if err := b.extractHeadingArgsFromNode(node); err != nil {
+ return stages, err
+ }
for i, root := range SplitBy(node, command.From) {
name, _ := extractNameFromNode(root.Children[0])
if len(name) == 0 {
@@ -189,7 +192,36 @@ func NewStages(node *parser.Node, b *Builder) Stages {
Node: root,
})
}
- return stages
+ return stages, nil
+}
+
+func (b *Builder) extractHeadingArgsFromNode(node *parser.Node) error {
+ var args []*parser.Node
+ var children []*parser.Node
+ extract := true
+ for _, child := range node.Children {
+ if extract && child.Value == command.Arg {
+ args = append(args, child)
+ } else {
+ if child.Value == command.From {
+ extract = false
+ }
+ children = append(children, child)
+ }
+ }
+
+ for _, c := range args {
+ step := b.Step()
+ if err := step.Resolve(c); err != nil {
+ return err
+ }
+ if err := b.Run(step, NoopExecutor, false); err != nil {
+ return err
+ }
+ }
+
+ node.Children = children
+ return nil
}
func extractNameFromNode(node *parser.Node) (string, bool) {
@@ -345,6 +377,9 @@ var ErrNoFROM = fmt.Errorf("no FROM statement found")
// is set to the first From found, or left unchanged if already
// set.
func (b *Builder) From(node *parser.Node) (string, error) {
+ if err := b.extractHeadingArgsFromNode(node); err != nil {
+ return "", err
+ }
children := SplitChildren(node, command.From)
switch {
case len(children) == 0:
diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go
index 068d5cc6f..f6510c2fd 100644
--- a/vendor/github.com/openshift/imagebuilder/dispatchers.go
+++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go
@@ -27,11 +27,6 @@ var (
obRgex = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`)
)
-// dispatch with no layer / parsing. This is effectively not a command.
-func nullDispatch(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
- return nil
-}
-
// ENV foo bar
//
// Sets the environment variable foo to bar, also makes interpolation
@@ -181,6 +176,17 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri
}
name := args[0]
+
+ // Support ARG before from
+ argStrs := []string{}
+ for n, v := range b.Args {
+ argStrs = append(argStrs, n+"="+v)
+ }
+ var err error
+ if name, err = ProcessWord(name, argStrs); err != nil {
+ return err
+ }
+
// Windows cannot support a container with no base image.
if name == NoBaseImageSpecifier {
if runtime.GOOS == "windows" {
@@ -438,6 +444,7 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs
healthcheck := docker.HealthConfig{}
flags := flag.NewFlagSet("", flag.ContinueOnError)
+ flags.String("start-period", "", "")
flags.String("interval", "", "")
flags.String("timeout", "", "")
flRetries := flags.String("retries", "", "")
@@ -462,6 +469,12 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs
return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
}
+ period, err := parseOptInterval(flags.Lookup("start-period"))
+ if err != nil {
+ return err
+ }
+ healthcheck.StartPeriod = period
+
interval, err := parseOptInterval(flags.Lookup("interval"))
if err != nil {
return err
diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go
index 83263127e..e1cd5d6d6 100644
--- a/vendor/github.com/openshift/imagebuilder/evaluator.go
+++ b/vendor/github.com/openshift/imagebuilder/evaluator.go
@@ -122,8 +122,7 @@ func (b *Step) Resolve(ast *parser.Node) error {
envs := b.Env
for ast.Next != nil {
ast = ast.Next
- var str string
- str = ast.Value
+ str := ast.Value
if replaceEnvAllowed[cmd] {
var err error
var words []string
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
index d3a8ae5fd..24822b2b7 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go
@@ -18,76 +18,102 @@ import (
// #include "builtin.go.h"
import "C"
+// Repo represents a local ostree repository
type Repo struct {
- //*glib.GObject
ptr unsafe.Pointer
}
-// Converts an ostree repo struct to its C equivalent
+// isInitialized checks if the repo has been initialized
+func (r *Repo) isInitialized() bool {
+ if r == nil || r.ptr == nil {
+ return false
+ }
+ return true
+}
+
+// native converts an ostree repo struct to its C equivalent
func (r *Repo) native() *C.OstreeRepo {
- //return (*C.OstreeRepo)(r.Ptr())
+ if !r.isInitialized() {
+ return nil
+ }
return (*C.OstreeRepo)(r.ptr)
}
-// Takes a C ostree repo and converts it to a Go struct
-func repoFromNative(p *C.OstreeRepo) *Repo {
- if p == nil {
+// repoFromNative takes a C ostree repo and converts it to a Go struct
+func repoFromNative(or *C.OstreeRepo) *Repo {
+ if or == nil {
return nil
}
- //o := (*glib.GObject)(unsafe.Pointer(p))
- //r := &Repo{o}
- r := &Repo{unsafe.Pointer(p)}
+ r := &Repo{unsafe.Pointer(or)}
return r
}
-// Checks if the repo has been initialized
-func (r *Repo) isInitialized() bool {
- if r.ptr != nil {
- return true
+// OpenRepo attempts to open the repo at the given path
+func OpenRepo(path string) (*Repo, error) {
+ if path == "" {
+ return nil, errors.New("empty path")
}
- return false
-}
-// Attempts to open the repo at the given path
-func OpenRepo(path string) (*Repo, error) {
- var cerr *C.GError = nil
cpath := C.CString(path)
- pathc := C.g_file_new_for_path(cpath)
- defer C.g_object_unref(C.gpointer(pathc))
- crepo := C.ostree_repo_new(pathc)
+ defer C.free(unsafe.Pointer(cpath))
+ repoPath := C.g_file_new_for_path(cpath)
+ defer C.g_object_unref(C.gpointer(repoPath))
+ crepo := C.ostree_repo_new(repoPath)
repo := repoFromNative(crepo)
+
+ var cerr *C.GError
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr)))
if !r {
return nil, generateError(cerr)
}
+
return repo, nil
}
-// Enable support for tombstone commits, which allow the repo to distinguish between
-// commits that were intentionally deleted and commits that were removed accidentally
-func enableTombstoneCommits(repo *Repo) error {
- var tombstoneCommits bool
- var config *C.GKeyFile = C.ostree_repo_get_config(repo.native())
- var cerr *C.GError
+// enableTombstoneCommits enables support for tombstone commits.
+//
+// This allows to distinguish between intentional deletions and accidental removals
+// of commits.
+func (r *Repo) enableTombstoneCommits() error {
+ if !r.isInitialized() {
+ return errors.New("repo not initialized")
+ }
- tombstoneCommits = glib.GoBool(glib.GBoolean(C.g_key_file_get_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), nil)))
+ config := C.ostree_repo_get_config(r.native())
+ groupC := C.CString("core")
+ defer C.free(unsafe.Pointer(groupC))
+ keyC := C.CString("tombstone-commits")
+ defer C.free(unsafe.Pointer(keyC))
+ valueC := C.g_key_file_get_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), nil)
+ tombstoneCommits := glib.GoBool(glib.GBoolean(valueC))
- //tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file
+ // tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file
if !tombstoneCommits {
- C.g_key_file_set_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), C.TRUE)
- if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(repo.native(), config, &cerr))) {
+ var cerr *C.GError
+ C.g_key_file_set_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), C.TRUE)
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(r.native(), config, &cerr))) {
return generateError(cerr)
}
}
return nil
}
+// generateError wraps a GLib error into a Go one.
func generateError(err *C.GError) error {
+ if err == nil {
+ return errors.New("nil GError")
+ }
+
goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err)))
_, file, line, ok := runtime.Caller(1)
if ok {
- return errors.New(fmt.Sprintf("%s:%d - %s", file, line, goErr))
- } else {
- return goErr
+ return fmt.Errorf("%s:%d - %s", file, line, goErr)
}
+ return goErr
+}
+
+// isOk wraps a return value (gboolean/gint) into a bool.
+// 0 is false/error, everything else is true/ok.
+func isOk(v C.int) bool {
+ return glib.GoBool(glib.GBoolean(v))
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
index 734de9821..76171554d 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h
@@ -33,24 +33,12 @@ _ostree_repo_file(GFile *file)
return OSTREE_REPO_FILE (file);
}
-static guint
-_gpointer_to_uint (gpointer ptr)
-{
- return GPOINTER_TO_UINT (ptr);
-}
-
static gpointer
_guint_to_pointer (guint u)
{
return GUINT_TO_POINTER (u);
}
-static void
-_g_clear_object (volatile GObject **object_ptr)
-{
- g_clear_object(object_ptr);
-}
-
static const GVariantType*
_g_variant_type (char *type)
{
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
index 55b51bfbd..04ada1792 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go
@@ -1,7 +1,7 @@
package otbuiltin
import (
- "strings"
+ "errors"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
@@ -14,34 +14,42 @@ import (
// #include "builtin.go.h"
import "C"
-// Global variable for options
-var checkoutOpts checkoutOptions
-
-// Contains all of the options for checking commits out of
-// an ostree repo
+// checkoutOptions defines all of the options for checking commits
+// out of an ostree repo
+//
+// Note: while this is private, fields are public and part of the API.
type checkoutOptions struct {
- UserMode bool // Do not change file ownership or initialize extended attributes
- Union bool // Keep existing directories and unchanged files, overwriting existing filesystem
- AllowNoent bool // Do nothing if the specified filepath does not exist
- DisableCache bool // Do not update or use the internal repository uncompressed object caceh
- Whiteouts bool // Process 'whiteout' (docker style) entries
- RequireHardlinks bool // Do not fall back to full copies if hard linking fails
- Subpath string // Checkout sub-directory path
- FromFile string // Process many checkouts from the given file
+ // UserMode defines whether to checkout a repo in `bare-user` mode
+ UserMode bool
+ // Union specifies whether to overwrite existing filesystem entries
+ Union bool
+ // AllowNoEnt defines whether to skip filepaths that do not exist
+ AllowNoent bool
+ // DisableCache defines whether to disable internal repository uncompressed object cache
+ DisableCache bool
+ // Whiteouts defines whether to Process 'whiteout' (docker style) entries
+ Whiteouts bool
+ // RequireHardlinks defines whether to fall back to full copies if hard linking fails
+ RequireHardlinks bool
+ // SubPath specifies a sub-directory to use for checkout
+ Subpath string
+ // FromFile specifies an optional file containing many checkouts to process
+ FromFile string
}
-// Instantiates and returns a checkoutOptions struct with default values set
+// NewCheckoutOptions instantiates and returns a checkoutOptions struct with default values set
func NewCheckoutOptions() checkoutOptions {
return checkoutOptions{}
}
-// Checks out a commit with the given ref from a repository at the location of repo path to to the destination. Returns an error if the checkout could not be processed
-func Checkout(repoPath, destination, commit string, opts checkoutOptions) error {
- checkoutOpts = opts
-
+// Checkout checks out commit `commitRef` from a repository at `repoPath`,
+// writing it to `destination`. Returns an error if the checkout could not be processed.
+func Checkout(repoPath, destination, commitRef string, opts checkoutOptions) error {
var cancellable *glib.GCancellable
- ccommit := C.CString(commit)
+
+ ccommit := C.CString(commitRef)
defer C.free(unsafe.Pointer(ccommit))
+
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
@@ -53,50 +61,48 @@ func Checkout(repoPath, destination, commit string, opts checkoutOptions) error
return generateError(cerr)
}
- if strings.Compare(checkoutOpts.FromFile, "") != 0 {
- err := processManyCheckouts(crepo, destination, cancellable)
- if err != nil {
- return err
- }
- } else {
- var resolvedCommit *C.char
- defer C.free(unsafe.Pointer(resolvedCommit))
- if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) {
- return generateError(cerr)
- }
- err := processOneCheckout(crepo, resolvedCommit, checkoutOpts.Subpath, destination, cancellable)
- if err != nil {
- return err
- }
+ // Multiple checkouts to process
+ if opts.FromFile != "" {
+ return processManyCheckouts(crepo, destination, cancellable)
}
- return nil
+
+ // Simple single checkout
+ var resolvedCommit *C.char
+ defer C.free(unsafe.Pointer(resolvedCommit))
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) {
+ return generateError(cerr)
+ }
+
+ return processOneCheckout(crepo, resolvedCommit, destination, opts, cancellable)
}
-// Processes one checkout from the repo
-func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, subpath, destination string, cancellable *glib.GCancellable) error {
+// processOneCheckout processes one checkout from the repo
+func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, destination string, opts checkoutOptions, cancellable *glib.GCancellable) error {
cdest := C.CString(destination)
defer C.free(unsafe.Pointer(cdest))
+
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions
- if checkoutOpts.UserMode {
+ // Process options into bitflags
+ var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions
+ if opts.UserMode {
repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER
}
- if checkoutOpts.Union {
+ if opts.Union {
repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES
}
- checkedOut := glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr)))
- if !checkedOut {
+ // Checkout commit to destination
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr))) {
return generateError(cerr)
}
return nil
}
-// process many checkouts
+// processManyCheckouts processes many checkouts in a single batch
func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error {
- return nil
+ return errors.New("batch checkouts processing: not implemented")
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
index 9550f802c..ccaff7a10 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go
@@ -59,11 +59,11 @@ func NewCommitOptions() commitOptions {
}
type OstreeRepoTransactionStats struct {
- metadata_objects_total int32
+ metadata_objects_total int32
metadata_objects_written int32
- content_objects_total int32
- content_objects_written int32
- content_bytes_written uint64
+ content_objects_total int32
+ content_objects_written int32
+ content_bytes_written uint64
}
func (repo *Repo) PrepareTransaction() (bool, error) {
@@ -125,6 +125,7 @@ func (repo *Repo) RegenerateSummary() error {
// Commits a directory, specified by commitPath, to an ostree repo as a given branch
func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) {
+ // TODO(lucab): `options` is global un-synchronized mutable state, get rid of it.
options = opts
var err error
@@ -140,7 +141,7 @@ func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string,
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
var metadata *C.GVariant = nil
- defer func(){
+ defer func() {
if metadata != nil {
defer C.g_variant_unref(metadata)
}
@@ -196,7 +197,7 @@ func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string,
}
if options.AddDetachedMetadataString != nil {
- _, err := parseKeyValueStrings(options.AddDetachedMetadataString)
+ _, err = parseKeyValueStrings(options.AddDetachedMetadataString)
if err != nil {
goto out
}
@@ -476,7 +477,7 @@ func handleStatOverrideLine(line string, table *glib.GHashTable) error {
// Handle an individual line from a Skiplist file
func handleSkipListline(line string, table *glib.GHashTable) error {
- C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer( C.g_strdup((*C.gchar)(C.CString(line)))))
+ C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line)))))
return nil
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
index c1ca2dc7e..6ee6671b4 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go
@@ -1,11 +1,8 @@
package otbuiltin
import (
- "errors"
"strings"
"unsafe"
-
- glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
@@ -15,43 +12,37 @@ import (
// #include "builtin.go.h"
import "C"
-// Declare variables for options
-var initOpts initOptions
-
-// Contains all of the options for initializing an ostree repo
+// initOptions contains all of the options for initializing an ostree repo
+//
+// Note: while this is private, exported fields are public and part of the API.
type initOptions struct {
- Mode string // either bare, archive-z2, or bare-user
-
- repoMode C.OstreeRepoMode
+ // Mode defines repository mode: either bare, archive-z2, or bare-user
+ Mode string
}
-// Instantiates and returns an initOptions struct with default values set
+// NewInitOptions instantiates and returns an initOptions struct with default values set
func NewInitOptions() initOptions {
- io := initOptions{}
- io.Mode = "bare"
- io.repoMode = C.OSTREE_REPO_MODE_BARE
- return io
+ return initOptions{
+ Mode: "bare",
+ }
}
-// Initializes a new ostree repository at the given path. Returns true
+// Init initializes a new ostree repository at the given path. Returns true
// if the repo exists at the location, regardless of whether it was initialized
// by the function or if it already existed. Returns an error if the repo could
// not be initialized
func Init(path string, options initOptions) (bool, error) {
- initOpts = options
- err := parseMode()
+ repoMode, err := parseRepoMode(options.Mode)
if err != nil {
return false, err
}
// Create a repo struct from the path
- var cerr *C.GError
- defer C.free(unsafe.Pointer(cerr))
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
pathc := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(pathc))
- crepo := C.ostree_repo_new(pathc)
+ repo := C.ostree_repo_new(pathc)
// If the repo exists in the filesystem, return an error but set exists to true
/* var exists C.gboolean = 0
@@ -63,28 +54,31 @@ func Init(path string, options initOptions) (bool, error) {
return false, generateError(cerr)
}*/
- cerr = nil
- created := glib.GoBool(glib.GBoolean(C.ostree_repo_create(crepo, initOpts.repoMode, nil, &cerr)))
- if !created {
- errString := generateError(cerr).Error()
- if strings.Contains(errString, "File exists") {
- return true, generateError(cerr)
+ var cErr *C.GError
+ defer C.free(unsafe.Pointer(cErr))
+ if r := C.ostree_repo_create(repo, repoMode, nil, &cErr); !isOk(r) {
+ err := generateError(cErr)
+ if strings.Contains(err.Error(), "File exists") {
+ return true, err
}
- return false, generateError(cerr)
+ return false, err
}
return true, nil
}
-// Converts the mode string to a C.OSTREE_REPO_MODE enum value
-func parseMode() error {
- if strings.EqualFold(initOpts.Mode, "bare") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_BARE
- } else if strings.EqualFold(initOpts.Mode, "bare-user") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_BARE_USER
- } else if strings.EqualFold(initOpts.Mode, "archive-z2") {
- initOpts.repoMode = C.OSTREE_REPO_MODE_ARCHIVE_Z2
- } else {
- return errors.New("Invalid option for mode")
+// parseRepoMode converts a mode string to a C.OSTREE_REPO_MODE enum value
+func parseRepoMode(modeLabel string) (C.OstreeRepoMode, error) {
+ var cErr *C.GError
+ defer C.free(unsafe.Pointer(cErr))
+
+ cModeLabel := C.CString(modeLabel)
+ defer C.free(unsafe.Pointer(cModeLabel))
+
+ var retMode C.OstreeRepoMode
+ if r := C.ostree_repo_mode_from_string(cModeLabel, &retMode, &cErr); !isOk(r) {
+ // NOTE(lucab): zero-value for this C enum has no special/invalid meaning.
+ return C.OSTREE_REPO_MODE_BARE, generateError(cErr)
}
- return nil
+
+ return retMode, nil
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
index 2ceea0925..d57498215 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go
@@ -2,7 +2,6 @@ package otbuiltin
import (
"fmt"
- "strings"
"time"
"unsafe"
@@ -16,13 +15,7 @@ import (
// #include "builtin.go.h"
import "C"
-// Declare variables for options
-var logOpts logOptions
-
-// Set the format of the strings in the log
-const formatString = "2006-01-02 03:04;05 -0700"
-
-// Struct for the various pieces of data in a log entry
+// LogEntry is a struct for the various pieces of data in a log entry
type LogEntry struct {
Checksum []byte
Variant []byte
@@ -39,24 +32,25 @@ func (l LogEntry) String() string {
return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant)
}
-type OstreeDumpFlags uint
+type ostreeDumpFlags uint
const (
- OSTREE_DUMP_NONE OstreeDumpFlags = 0
- OSTREE_DUMP_RAW OstreeDumpFlags = 1 << iota
+ ostreeDumpNone ostreeDumpFlags = 0
+ ostreeDumpRaw ostreeDumpFlags = 1 << iota
)
-// Contains all of the options for initializing an ostree repo
+// logOptions contains all of the options for initializing an ostree repo
type logOptions struct {
- Raw bool // Show raw variant data
+ // Raw determines whether to show raw variant data
+ Raw bool
}
-//Instantiates and returns a logOptions struct with default values set
+// NewLogOptions instantiates and returns a logOptions struct with default values set
func NewLogOptions() logOptions {
return logOptions{}
}
-// Show the logs of a branch starting with a given commit or ref. Returns a
+// Log shows the logs of a branch starting with a given commit or ref. Returns a
// slice of log entries on success and an error otherwise
func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
// attempt to open the repository
@@ -69,12 +63,12 @@ func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
defer C.free(unsafe.Pointer(cbranch))
var checksum *C.char
defer C.free(unsafe.Pointer(checksum))
- var flags OstreeDumpFlags = OSTREE_DUMP_NONE
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
- if logOpts.Raw {
- flags |= OSTREE_DUMP_RAW
+ flags := ostreeDumpNone
+ if options.Raw {
+ flags |= ostreeDumpRaw
}
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) {
@@ -84,84 +78,86 @@ func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
return logCommit(repo, checksum, false, flags)
}
-func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags OstreeDumpFlags) ([]LogEntry, error) {
+func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags ostreeDumpFlags) ([]LogEntry, error) {
var variant *C.GVariant
- var parent *C.char
- defer C.free(unsafe.Pointer(parent))
var gerr = glib.NewGError()
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- entries := make([]LogEntry, 0, 1)
- var err error
if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) {
if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) {
return nil, nil
}
- return entries, generateError(cerr)
+ return nil, generateError(cerr)
}
- nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags)
-
- // get the parent of this commit
- parent = (*C.char)(C.ostree_commit_get_parent(variant))
+ // Get the parent of this commit
+ parent := (*C.char)(C.ostree_commit_get_parent(variant))
defer C.free(unsafe.Pointer(parent))
+
+ entries := make([]LogEntry, 0, 1)
if parent != nil {
+ var err error
entries, err = logCommit(repo, parent, true, flags)
if err != nil {
return nil, err
}
}
- entries = append(entries, *nextLogEntry)
+
+ nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags)
+ entries = append(entries, nextLogEntry)
+
return entries, nil
}
-func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags OstreeDumpFlags) *LogEntry {
- objLog := new(LogEntry)
- objLog.Checksum = []byte(C.GoString(checksum))
+func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags ostreeDumpFlags) LogEntry {
+ csum := []byte(C.GoString(checksum))
- if (flags & OSTREE_DUMP_RAW) != 0 {
- dumpVariant(objLog, variant)
- return objLog
+ if (flags & ostreeDumpRaw) != 0 {
+ return dumpVariant(variant, csum)
}
switch objectType {
case C.OSTREE_OBJECT_TYPE_COMMIT:
- dumpCommit(objLog, variant, flags)
- return objLog
+ return dumpCommit(variant, flags, csum)
default:
- return objLog
+ return LogEntry{
+ Checksum: csum,
+ }
}
}
-func dumpVariant(log *LogEntry, variant *C.GVariant) {
- var byteswappedVariant *C.GVariant
-
+func dumpVariant(variant *C.GVariant, csum []byte) LogEntry {
+ var logVariant []byte
if C.G_BYTE_ORDER != C.G_BIG_ENDIAN {
- byteswappedVariant = C.g_variant_byteswap(variant)
- log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
+ byteswappedVariant := C.g_variant_byteswap(variant)
+ logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
} else {
- log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
+ logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(variant, C.TRUE))))
+ }
+
+ return LogEntry{
+ Checksum: csum,
+ Variant: logVariant,
}
}
-func dumpCommit(log *LogEntry, variant *C.GVariant, flags OstreeDumpFlags) {
- var subject, body *C.char
+func dumpCommit(variant *C.GVariant, flags ostreeDumpFlags, csum []byte) LogEntry {
+ var subject *C.char
defer C.free(unsafe.Pointer(subject))
+ var body *C.char
defer C.free(unsafe.Pointer(body))
- var timestamp C.guint64
+ var timeBigE C.guint64
- C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timestamp)
+ C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timeBigE)
- // Timestamp is now a Unix formatted timestamp as a guint64
- timestamp = C._guint64_from_be(timestamp)
- log.Timestamp = time.Unix((int64)(timestamp), 0)
-
- if strings.Compare(C.GoString(subject), "") != 0 {
- log.Subject = C.GoString(subject)
- }
+ // Translate to a host-endian epoch and convert to Go timestamp
+ timeHostE := C._guint64_from_be(timeBigE)
+ timestamp := time.Unix((int64)(timeHostE), 0)
- if strings.Compare(C.GoString(body), "") != 0 {
- log.Body = C.GoString(body)
+ return LogEntry{
+ Timestamp: timestamp,
+ Subject: C.GoString(subject),
+ Body: C.GoString(body),
}
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
index 8dfa40a55..532522fc5 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go
@@ -145,7 +145,7 @@ func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancella
}
}
- if err := enableTombstoneCommits(repo); err != nil {
+ if err := repo.enableTombstoneCommits(); err != nil {
return err
}
@@ -169,7 +169,7 @@ func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *gl
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
- if err := enableTombstoneCommits(repo); err != nil {
+ if err := repo.enableTombstoneCommits(); err != nil {
return err
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go
deleted file mode 100644
index d43ea07c7..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go
+++ /dev/null
@@ -1 +0,0 @@
-package otbuiltin
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go
+++ /dev/null
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go
+++ /dev/null