summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/buildah/image.go10
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go32
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.c9
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.go5
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf8
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go64
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_unix.go10
-rw-r--r--vendor/github.com/containers/storage/layers.go17
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/store.go14
10 files changed, 107 insertions, 64 deletions
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index df50d95bd..fb2e87f93 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -329,20 +329,19 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil {
return nil, err
}
- defer rc.Close()
} else {
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
return nil, errors.Wrapf(err, "error extracting %s", what)
}
- defer rc.Close()
}
srcHasher := digest.Canonical.Digester()
reader := io.TeeReader(rc, srcHasher.Hash())
// Set up to write the possibly-recompressed blob.
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
+ rc.Close()
return nil, errors.Wrapf(err, "error opening file for %s", what)
}
destHasher := digest.Canonical.Digester()
@@ -351,14 +350,17 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Compress the layer, if we're recompressing it.
writer, err := archive.CompressStream(multiWriter, i.compression)
if err != nil {
+ layerFile.Close()
+ rc.Close()
return nil, errors.Wrapf(err, "error compressing %s", what)
}
size, err := io.Copy(writer, reader)
+ writer.Close()
+ layerFile.Close()
+ rc.Close()
if err != nil {
return nil, errors.Wrapf(err, "error storing %s to file", what)
}
- writer.Close()
- layerFile.Close()
if i.compression == archive.Uncompressed {
if size != counter.Count {
return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 4bcd38c05..9b4c6f635 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -648,20 +648,25 @@ func (b *Executor) Prepare(ctx context.Context, ib *imagebuilder.Builder, node *
for _, v := range builder.Volumes() {
volumes[v] = struct{}{}
}
+ ports := map[docker.Port]struct{}{}
+ for _, p := range builder.Ports() {
+ ports[docker.Port(p)] = struct{}{}
+ }
dConfig := docker.Config{
- Hostname: builder.Hostname(),
- Domainname: builder.Domainname(),
- User: builder.User(),
- Env: builder.Env(),
- Cmd: builder.Cmd(),
- Image: from,
- Volumes: volumes,
- WorkingDir: builder.WorkDir(),
- Entrypoint: builder.Entrypoint(),
- Labels: builder.Labels(),
- Shell: builder.Shell(),
- StopSignal: builder.StopSignal(),
- OnBuild: builder.OnBuild(),
+ Hostname: builder.Hostname(),
+ Domainname: builder.Domainname(),
+ User: builder.User(),
+ Env: builder.Env(),
+ Cmd: builder.Cmd(),
+ Image: from,
+ Volumes: volumes,
+ WorkingDir: builder.WorkDir(),
+ Entrypoint: builder.Entrypoint(),
+ Labels: builder.Labels(),
+ Shell: builder.Shell(),
+ StopSignal: builder.StopSignal(),
+ OnBuild: builder.OnBuild(),
+ ExposedPorts: ports,
}
var rootfs *docker.RootFS
if builder.Docker.RootFS != nil {
@@ -751,6 +756,7 @@ func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *
checkForLayers := true
children := node.Children
commitName := b.output
+ b.containerIDs = nil
for i, node := range node.Children {
step := ib.Step()
if err := step.Resolve(node); err != nil {
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.c b/vendor/github.com/containers/buildah/unshare/unshare.c
index 83864359b..47d775c73 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.c
+++ b/vendor/github.com/containers/buildah/unshare/unshare.c
@@ -31,7 +31,7 @@ static int _buildah_unshare_parse_envint(const char *envname) {
void _buildah_unshare(void)
{
- int flags, pidfd, continuefd, n, pgrp, sid, ctty, allow_setgroups;
+ int flags, pidfd, continuefd, n, pgrp, sid, ctty;
char buf[2048];
flags = _buildah_unshare_parse_envint("_Buildah-unshare");
@@ -83,14 +83,7 @@ void _buildah_unshare(void)
_exit(1);
}
}
- allow_setgroups = _buildah_unshare_parse_envint("_Buildah-allow-setgroups");
if ((flags & CLONE_NEWUSER) != 0) {
- if (allow_setgroups == 1) {
- if (setgroups(0, NULL) != 0) {
- fprintf(stderr, "Error during setgroups(0, NULL): %m\n");
- _exit(1);
- }
- }
if (setresgid(0, 0, 0) != 0) {
fprintf(stderr, "Error during setresgid(0): %m\n");
_exit(1);
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go
index d89dfc053..74b107e44 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/unshare/unshare.go
@@ -84,11 +84,6 @@ func (c *Cmd) Start() error {
c.Env = append(c.Env, fmt.Sprintf("_Buildah-ctty=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
}
- if c.GidMappingsEnableSetgroups {
- c.Env = append(c.Env, "_Buildah-allow-setgroups=1")
- } else {
- c.Env = append(c.Env, "_Buildah-allow-setgroups=0")
- }
// Make sure we clean up our pipes.
defer func() {
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index e69c92496..92c3be927 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -3,9 +3,9 @@ github.com/blang/semver master
github.com/BurntSushi/toml master
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
-github.com/seccomp/containers-golang master
-github.com/containers/image 85d7559d44fd71f30e46e43d809bfbf88d11d916
-github.com/containers/storage 243c4cd616afdf06b4a975f18c4db083d26b1641
+github.com/containers/image 8f11f3ad8912d8bc43a7d25992b8f313ffefd430
+github.com/containers/libpod 2afadeec6696fefac468a49c8ba24b0bc275aa75
+github.com/containers/storage 68332c059156eae970a03245cfcd4d717fb66ecd
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
@@ -42,7 +42,7 @@ github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
github.com/pborman/uuid master
github.com/pkg/errors master
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
-github.com/containers/libpod 2afadeec6696fefac468a49c8ba24b0bc275aa75
+github.com/seccomp/containers-golang master
github.com/sirupsen/logrus master
github.com/syndtr/gocapability master
github.com/tchap/go-patricia master
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index 6ae525df4..d1b010a76 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -313,6 +313,10 @@ func (s storageImageDestination) DesiredLayerCompression() types.LayerCompressio
return types.PreserveOriginal
}
+func (s *storageImageDestination) computeNextBlobCacheFile() string {
+ return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
+}
+
// PutBlob stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
@@ -328,7 +332,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
}
diffID := digest.Canonical.Digester()
- filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
+ filename := s.computeNextBlobCacheFile()
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
if err != nil {
return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename)
@@ -504,7 +508,6 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
continue
}
- var diff io.ReadCloser
// Check if there's already a layer with the ID that we'd give to the result of applying
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
@@ -533,19 +536,11 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
lastLayer = layer.ID
continue
}
- // Check if we cached a file with that blobsum. If we didn't already have a layer with
- // the blob's contents, we should have gotten a copy.
- if filename, ok := s.filenames[blob.Digest]; ok {
- // Use the file's contents to initialize the layer.
- file, err2 := os.Open(filename)
- if err2 != nil {
- return errors.Wrapf(err2, "error opening file %q", filename)
- }
- defer file.Close()
- diff = file
- }
- if diff == nil {
- // Try to find a layer with contents matching that blobsum.
+ // Check if we previously cached a file with that blob's contents. If we didn't,
+ // then we need to read the desired contents from a layer.
+ filename, ok := s.filenames[blob.Digest]
+ if !ok {
+ // Try to find the layer with contents matching that blobsum.
layer := ""
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest)
if err2 == nil && len(layers) > 0 {
@@ -559,25 +554,48 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if layer == "" {
return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
}
- // Use the layer's contents to initialize the new layer.
+ // Read the layer's contents.
noCompression := archive.Uncompressed
diffOptions := &storage.DiffOptions{
Compression: &noCompression,
}
- diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions)
+ diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
if err2 != nil {
return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
}
- defer diff.Close()
+ // Copy the layer diff to a file. Diff() takes a lock that it holds
+ // until the ReadCloser that it returns is closed, and PutLayer() wants
+ // the same lock, so the diff can't just be directly streamed from one
+ // to the other.
+ filename = s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ diff.Close()
+ return errors.Wrapf(err, "error creating temporary file %q", filename)
+ }
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using
+ // ctx.Done().
+ _, err = io.Copy(file, diff)
+ diff.Close()
+ file.Close()
+ if err != nil {
+ return errors.Wrapf(err, "error storing blob to file %q", filename)
+ }
+ // Make sure that we can find this file later, should we need the layer's
+ // contents again.
+ s.filenames[blob.Digest] = filename
}
- if diff == nil {
- // This shouldn't have happened.
- return errors.Errorf("error applying blob %q: content not found", blob.Digest)
+ // Read the cached blob and use it as a diff.
+ file, err := os.Open(filename)
+ if err != nil {
+ return errors.Wrapf(err, "error opening file %q", filename)
}
+ defer file.Close()
// Build the new layer using the diff, regardless of where it came from.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, diff)
- if err != nil {
+ layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
+ if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
}
lastLayer = layer.ID
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
index 5454657ec..b37a9271a 100644
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -45,10 +45,20 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
uid, gid = mappedPair.UID, mappedPair.GID
}
if uid != int(st.Uid) || gid != int(st.Gid) {
+ stat, err := os.Lstat(path)
+ if err != nil {
+ return fmt.Errorf("%s: lstat(%q): %v", os.Args[0], path, err)
+ }
// Make the change.
if err := syscall.Lchown(path, uid, gid); err != nil {
return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
}
+ // Restore the SUID and SGID bits if they were originally set.
+ if (stat.Mode()&os.ModeSymlink == 0) && stat.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
+ if err := os.Chmod(path, stat.Mode()); err != nil {
+ return fmt.Errorf("%s: chmod(%q): %v", os.Args[0], path, err)
+ }
+ }
}
}
return nil
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 9989bd6be..fe263ba63 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -558,13 +558,22 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
StorageOpt: options,
}
if writeable {
- err = r.driver.CreateReadWrite(id, parent, &opts)
+ if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
+ if id != "" {
+ return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
+ }
+ return nil, -1, errors.Wrapf(err, "error creating read-write layer")
+ }
} else {
- err = r.driver.Create(id, parent, &opts)
+ if err = r.driver.Create(id, parent, &opts); err != nil {
+ if id != "" {
+ return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
+ }
+ return nil, -1, errors.Wrapf(err, "error creating layer")
+ }
}
if !reflect.DeepEqual(parentMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(parentMappings.GIDs(), idMappings.GIDs()) {
- err = r.driver.UpdateLayerIDMap(id, parentMappings, idMappings, mountLabel)
- if err != nil {
+ if err = r.driver.UpdateLayerIDMap(id, parentMappings, idMappings, mountLabel); err != nil {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
r.driver.Remove(id)
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
index 125b5d8c9..09b5d0f33 100644
--- a/vendor/github.com/containers/storage/layers_ffjson.go
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: layers.go
+// source: ./layers.go
package storage
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index e0deb2c30..94cf1f0a7 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -2369,13 +2369,23 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
}
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
store.Lock()
- defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
store.Load()
}
if store.Exists(to) {
- return store.Diff(from, to, options)
+ rc, err := store.Diff(from, to, options)
+ if rc != nil && err == nil {
+ wrapped := ioutils.NewReadCloserWrapper(rc, func() error {
+ err := rc.Close()
+ store.Unlock()
+ return err
+ })
+ return wrapped, nil
+ }
+ store.Unlock()
+ return rc, err
}
+ store.Unlock()
}
return nil, ErrLayerUnknown
}