diff options
author | Daniel J Walsh <dwalsh@redhat.com> | 2018-05-23 14:15:54 -0400 |
---|---|---|
committer | Atomic Bot <atomic-devel@projectatomic.io> | 2018-05-23 22:38:17 +0000 |
commit | 915364034f1ddf036d277830d45c54b8eb39f940 (patch) | |
tree | 968ec9dfe5a69626a69aef545a5dd429ae695cb1 /vendor/github.com | |
parent | d252fa710e55fde35824dfe1f01e03e783f04a18 (diff) | |
download | podman-915364034f1ddf036d277830d45c54b8eb39f940.tar.gz podman-915364034f1ddf036d277830d45c54b8eb39f940.tar.bz2 podman-915364034f1ddf036d277830d45c54b8eb39f940.zip |
Update podman build to match buildah bud functionality
Add --label, --annotations, --idfile, --squash
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Closes: #824
Approved by: TomSweeneyRedHat
Diffstat (limited to 'vendor/github.com')
10 files changed, 930 insertions, 151 deletions
diff --git a/vendor/github.com/projectatomic/buildah/buildah.go b/vendor/github.com/projectatomic/buildah/buildah.go index a14ec0a61..b05e5deb1 100644 --- a/vendor/github.com/projectatomic/buildah/buildah.go +++ b/vendor/github.com/projectatomic/buildah/buildah.go @@ -3,6 +3,7 @@ package buildah import ( "context" "encoding/json" + "fmt" "io" "io/ioutil" "os" @@ -35,11 +36,14 @@ const ( stateFile = Package + ".json" ) +// PullPolicy takes the value PullIfMissing, PullAlways, or PullNever. +type PullPolicy int + const ( // PullIfMissing is one of the values that BuilderOptions.PullPolicy // can take, signalling that the source image should be pulled from a // registry if a local copy of it is not already present. - PullIfMissing = iota + PullIfMissing PullPolicy = iota // PullAlways is one of the values that BuilderOptions.PullPolicy can // take, signalling that a fresh, possibly updated, copy of the image // should be pulled from a registry before the build proceeds. @@ -50,6 +54,19 @@ const ( PullNever ) +// String converts a PullPolicy into a string. +func (p PullPolicy) String() string { + switch p { + case PullIfMissing: + return "PullIfMissing" + case PullAlways: + return "PullAlways" + case PullNever: + return "PullNever" + } + return fmt.Sprintf("unrecognized policy %d", p) +} + // Builder objects are used to represent containers which are being used to // build images. They also carry potential updates which will be applied to // the image's configuration when the container's contents are used to build an @@ -95,9 +112,11 @@ type Builder struct { // Image metadata and runtime settings, in multiple formats. OCIv1 v1.Image `json:"ociv1,omitempty"` Docker docker.V2Image `json:"docker,omitempty"` - // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format + + // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format. DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"` - CommonBuildOpts *CommonBuildOptions + + CommonBuildOpts *CommonBuildOptions } // BuilderInfo are used as objects to display container information @@ -140,35 +159,54 @@ func GetBuildInfo(b *Builder) BuilderInfo { } } -// CommonBuildOptions are reseources that can be defined by flags for both buildah from and bud +// CommonBuildOptions are resources that can be defined by flags for both buildah from and build-using-dockerfile type CommonBuildOptions struct { // AddHost is the list of hostnames to add to the resolv.conf AddHost []string - //CgroupParent it the path to cgroups under which the cgroup for the container will be created. + // CgroupParent is the path to cgroups under which the cgroup for the container will be created. CgroupParent string - //CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period + // CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period CPUPeriod uint64 - //CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota + // CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota CPUQuota int64 - //CPUShares (relative weight + // CPUShares (relative weight CPUShares uint64 - //CPUSetCPUs in which to allow execution (0-3, 0,1) + // CPUSetCPUs in which to allow execution (0-3, 0,1) CPUSetCPUs string - //CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. CPUSetMems string - //Memory limit + // Memory is the upper limit (in bytes) on how much memory running containers can use. Memory int64 - //MemorySwap limit value equal to memory plus swap. + // MemorySwap limits the amount of memory and swap together. MemorySwap int64 - //SecruityOpts modify the way container security is running - LabelOpts []string + // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable". + // Recognized field names are "role", "type", and "level". + LabelOpts []string + // SeccompProfilePath is the pathname of a seccomp profile. SeccompProfilePath string - ApparmorProfile string - //ShmSize is the shared memory size + // ApparmorProfile is the name of an apparmor profile. + ApparmorProfile string + // ShmSize is the "size" value to use when mounting an shmfs on the container's /dev/shm directory. ShmSize string - //Ulimit options + // Ulimit specifies resource limit options, in the form type:softlimit[:hardlimit]. + // These types are recognized: + // "core": maximimum core dump size (ulimit -c) + // "cpu": maximum CPU time (ulimit -t) + // "data": maximum size of a process's data segment (ulimit -d) + // "fsize": maximum size of new files (ulimit -f) + // "locks": maximum number of file locks (ulimit -x) + // "memlock": maximum amount of locked memory (ulimit -l) + // "msgqueue": maximum amount of data in message queues (ulimit -q) + // "nice": niceness adjustment (nice -n, ulimit -e) + // "nofile": maximum number of open files (ulimit -n) + // "nproc": maximum number of processes (ulimit -u) + // "rss": maximum size of a process's (ulimit -m) + // "rtprio": maximum real-time scheduling priority (ulimit -r) + // "rttime": maximum amount of real-time execution between blocking syscalls + // "sigpending": maximum number of pending signals (ulimit -i) + // "stack": maximum stack size (ulimit -s) Ulimit []string - //Volumes to bind mount into the container + // Volumes to bind mount into the container Volumes []string } @@ -184,7 +222,7 @@ type BuilderOptions struct { // PullPolicy decides whether or not we should pull the image that // we're using as a base image. It should be PullIfMissing, // PullAlways, or PullNever. - PullPolicy int + PullPolicy PullPolicy // Registry is a value which is prepended to the image's name, if it // needs to be pulled and the image name alone can not be resolved to a // reference to a source image. No separator is implicitly added. @@ -209,7 +247,8 @@ type BuilderOptions struct { // github.com/containers/image/types SystemContext to hold credentials // and other authentication/authorization information. SystemContext *types.SystemContext - // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format + // DefaultMountsFilePath is the file path holding the mounts to be + // mounted in "host-path:container-path" format DefaultMountsFilePath string CommonBuildOpts *CommonBuildOptions } diff --git a/vendor/github.com/projectatomic/buildah/commit.go b/vendor/github.com/projectatomic/buildah/commit.go index da86ee855..75d2626f5 100644 --- a/vendor/github.com/projectatomic/buildah/commit.go +++ b/vendor/github.com/projectatomic/buildah/commit.go @@ -49,6 +49,9 @@ type CommitOptions struct { SystemContext *types.SystemContext // IIDFile tells the builder to write the image ID to the specified file IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool } // PushOptions can be used to alter how an image is copied somewhere. @@ -100,7 +103,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options // Check if we're keeping everything in local storage. If so, we can take certain shortcuts. _, destIsStorage := dest.Transport().(is.StoreTransport) exporting := !destIsStorage - src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp) + src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Squash, options.Compression, options.HistoryTimestamp) if err != nil { return imgID, errors.Wrapf(err, "error computing layer digests and building metadata") } diff --git a/vendor/github.com/projectatomic/buildah/image.go b/vendor/github.com/projectatomic/buildah/image.go index a54643806..c66a5cd08 100644 --- a/vendor/github.com/projectatomic/buildah/image.go +++ b/vendor/github.com/projectatomic/buildah/image.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" "os" @@ -41,6 +42,8 @@ type containerImageRef struct { compression archive.Compression name reference.Named names []string + containerID string + mountLabel string layerID string oconfig []byte dconfig []byte @@ -50,12 +53,15 @@ type containerImageRef struct { annotations map[string]string preferredManifestType string exporting bool + squash bool } type containerImageSource struct { path string ref *containerImageRef store storage.Store + containerID string + mountLabel string layerID string names []string compression archive.Compression @@ -94,6 +100,124 @@ func expectedDockerDiffIDs(image docker.V2Image) int { return expected } +// Compute the media types which we need to attach to a layer, given the type of +// compression that we'll be applying. +func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) { + omediaType = v1.MediaTypeImageLayer + dmediaType = docker.V2S2MediaTypeUncompressedLayer + if i.compression != archive.Uncompressed { + switch i.compression { + case archive.Gzip: + omediaType = v1.MediaTypeImageLayerGzip + dmediaType = docker.V2S2MediaTypeLayer + logrus.Debugf("compressing %s with gzip", what) + case archive.Bzip2: + // Until the image specs define a media type for bzip2-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with bzip2. + return "", "", errors.New("media type for bzip2-compressed layers is not defined") + case archive.Xz: + // Until the image specs define a media type for xz-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with xz. + return "", "", errors.New("media type for xz-compressed layers is not defined") + default: + logrus.Debugf("compressing %s with unknown compressor(?)", what) + } + } + return omediaType, dmediaType, nil +} + +// Extract the container's whole filesystem as if it were a single layer. +func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) { + mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) + if err != nil { + return nil, errors.Wrapf(err, "error extracting container %q", i.containerID) + } + tarOptions := &archive.TarOptions{ + Compression: archive.Uncompressed, + } + rc, err := archive.TarWithOptions(mountPoint, tarOptions) + if err != nil { + return nil, errors.Wrapf(err, "error extracting container %q", i.containerID) + } + return ioutils.NewReadCloserWrapper(rc, func() error { + err := rc.Close() + if err != nil { + err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID) + } + if err2 := i.store.Unmount(i.containerID); err == nil { + if err2 != nil { + err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID) + } + err = err2 + } + return err + }), nil +} + +// Build fresh copies of the container configuration structures so that we can edit them +// without making unintended changes to the original Builder. +func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) { + created := i.created + + // Build an empty image, and then decode over it. + oimage := v1.Image{} + if err := json.Unmarshal(i.oconfig, &oimage); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + } + // Always replace this value, since we're newer than our base image. + oimage.Created = &created + // Clear the list of diffIDs, since we always repopulate it. + oimage.RootFS.Type = docker.TypeLayers + oimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so that we can append + // entries to it. + if i.squash { + oimage.History = []v1.History{} + } + + // Build an empty image, and then decode over it. + dimage := docker.V2Image{} + if err := json.Unmarshal(i.dconfig, &dimage); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + } + // Always replace this value, since we're newer than our base image. + dimage.Created = created + // Clear the list of diffIDs, since we always repopulate it. + dimage.RootFS = &docker.V2S2RootFS{} + dimage.RootFS.Type = docker.TypeLayers + dimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so that we can append + // entries to it. + if i.squash { + dimage.History = []docker.V2S2History{} + } + + // Build empty manifests. The Layers lists will be populated later. + omanifest := v1.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: v1.Descriptor{ + MediaType: v1.MediaTypeImageConfig, + }, + Layers: []v1.Descriptor{}, + Annotations: i.annotations, + } + + dmanifest := docker.V2S2Manifest{ + V2Versioned: docker.V2Versioned{ + SchemaVersion: 2, + MediaType: docker.V2S2MediaTypeManifest, + }, + Config: docker.V2S2Descriptor{ + MediaType: docker.V2S2MediaTypeImageConfig, + }, + Layers: []docker.V2S2Descriptor{}, + } + + return oimage, omanifest, dimage, dmanifest, nil +} + func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) { // Decide which type of manifest and configuration output we're going to provide. manifestType := i.preferredManifestType @@ -109,11 +233,12 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if err != nil { return nil, errors.Wrapf(err, "unable to read layer %q", layerID) } - // Walk the list of parent layers, prepending each as we go. + // Walk the list of parent layers, prepending each as we go. If we're squashing, + // stop at the layer ID of the top layer, which we won't really be using anyway. for layer != nil { layers = append(append([]string{}, layerID), layers...) layerID = layer.Parent - if layerID == "" { + if layerID == "" || i.squash { err = nil break } @@ -139,57 +264,25 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } }() - // Build fresh copies of the configurations so that we don't mess with the values in the Builder - // object itself. - oimage := v1.Image{} - err = json.Unmarshal(i.oconfig, &oimage) + // Build fresh copies of the configurations and manifest so that we don't mess with any + // values in the Builder object itself. + oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests() if err != nil { return nil, err } - created := i.created - oimage.Created = &created - dimage := docker.V2Image{} - err = json.Unmarshal(i.dconfig, &dimage) - if err != nil { - return nil, err - } - dimage.Created = created - - // Start building manifests. - omanifest := v1.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: v1.Descriptor{ - MediaType: v1.MediaTypeImageConfig, - }, - Layers: []v1.Descriptor{}, - Annotations: i.annotations, - } - dmanifest := docker.V2S2Manifest{ - V2Versioned: docker.V2Versioned{ - SchemaVersion: 2, - MediaType: docker.V2S2MediaTypeManifest, - }, - Config: docker.V2S2Descriptor{ - MediaType: docker.V2S2MediaTypeImageConfig, - }, - Layers: []docker.V2S2Descriptor{}, - } - - oimage.RootFS.Type = docker.TypeLayers - oimage.RootFS.DiffIDs = []digest.Digest{} - dimage.RootFS = &docker.V2S2RootFS{} - dimage.RootFS.Type = docker.TypeLayers - dimage.RootFS.DiffIDs = []digest.Digest{} // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. for _, layerID := range layers { + what := fmt.Sprintf("layer %q", layerID) + if i.squash { + what = fmt.Sprintf("container %q", i.containerID) + } // The default layer media type assumes no compression. omediaType := v1.MediaTypeImageLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer - // If we're not re-exporting the data, reuse the blobsum and diff IDs. - if !i.exporting && layerID != i.layerID { + // If we're not re-exporting the data, and we're reusing layers individually, reuse + // the blobsum and diff IDs. + if !i.exporting && !i.squash && layerID != i.layerID { layer, err2 := i.store.Layer(layerID) if err2 != nil { return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) @@ -218,40 +311,37 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System continue } // Figure out if we need to change the media type, in case we're using compression. - if i.compression != archive.Uncompressed { - switch i.compression { - case archive.Gzip: - omediaType = v1.MediaTypeImageLayerGzip - dmediaType = docker.V2S2MediaTypeLayer - logrus.Debugf("compressing layer %q with gzip", layerID) - case archive.Bzip2: - // Until the image specs define a media type for bzip2-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with bzip2. - return nil, errors.New("media type for bzip2-compressed layers is not defined") - case archive.Xz: - // Until the image specs define a media type for xz-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with xz. - return nil, errors.New("media type for xz-compressed layers is not defined") - default: - logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID) - } + omediaType, dmediaType, err = i.computeLayerMIMEType(what) + if err != nil { + return nil, err } - // Start reading the layer. + // Start reading either the layer or the whole container rootfs. noCompression := archive.Uncompressed diffOptions := &storage.DiffOptions{ Compression: &noCompression, } - rc, err := i.store.Diff("", layerID, diffOptions) - if err != nil { - return nil, errors.Wrapf(err, "error extracting layer %q", layerID) + var rc io.ReadCloser + if i.squash { + // Extract the root filesystem as a single layer. + rc, err = i.extractRootfs() + if err != nil { + return nil, err + } + defer rc.Close() + } else { + // Extract this layer, one of possibly many. + rc, err = i.store.Diff("", layerID, diffOptions) + if err != nil { + return nil, errors.Wrapf(err, "error extracting %s", what) + } + defer rc.Close() } - defer rc.Close() srcHasher := digest.Canonical.Digester() reader := io.TeeReader(rc, srcHasher.Hash()) // Set up to write the possibly-recompressed blob. layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) if err != nil { - return nil, errors.Wrapf(err, "error opening file for layer %q", layerID) + return nil, errors.Wrapf(err, "error opening file for %s", what) } destHasher := digest.Canonical.Digester() counter := ioutils.NewWriteCounter(layerFile) @@ -259,26 +349,26 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Compress the layer, if we're recompressing it. writer, err := archive.CompressStream(multiWriter, i.compression) if err != nil { - return nil, errors.Wrapf(err, "error compressing layer %q", layerID) + return nil, errors.Wrapf(err, "error compressing %s", what) } size, err := io.Copy(writer, reader) if err != nil { - return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) + return nil, errors.Wrapf(err, "error storing %s to file", what) } writer.Close() layerFile.Close() if i.compression == archive.Uncompressed { if size != counter.Count { - return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count) + return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count) } } else { size = counter.Count } - logrus.Debugf("layer %q size is %d bytes", layerID, size) + logrus.Debugf("%s size is %d bytes", what, size) // Rename the layer so that we can more easily find it by digest later. err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) if err != nil { - return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) + return nil, errors.Wrapf(err, "error storing %s to file", what) } // Add a note in the manifest about the layer. The blobs are identified by their possibly- // compressed blob digests. @@ -383,6 +473,8 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System path: path, ref: i, store: i.store, + containerID: i.containerID, + mountLabel: i.mountLabel, layerID: i.layerID, names: i.names, compression: i.compression, @@ -440,15 +532,15 @@ func (i *containerImageSource) Reference() types.ImageReference { } func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) { - return nil, errors.Errorf("TODO") + if instanceDigest != nil { + return nil, errors.Errorf("containerImageSource does not support manifest lists") } return nil, nil } func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) { - return nil, "", errors.Errorf("TODO") + if instanceDigest != nil { + return nil, "", errors.Errorf("containerImageSource does not support manifest lists") } return i.manifest, i.manifestType, nil } @@ -488,7 +580,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil } -func (b *Builder) makeImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { +func (b *Builder) makeImageRef(manifestType string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) if err != nil { @@ -519,6 +611,8 @@ func (b *Builder) makeImageRef(manifestType string, exporting bool, compress arc compression: compress, name: name, names: container.Names, + containerID: container.ID, + mountLabel: b.MountLabel, layerID: container.LayerID, oconfig: oconfig, dconfig: dconfig, @@ -528,6 +622,7 @@ func (b *Builder) makeImageRef(manifestType string, exporting bool, compress arc annotations: b.Annotations(), preferredManifestType: manifestType, exporting: exporting, + squash: squash, } return ref, nil } diff --git a/vendor/github.com/projectatomic/buildah/imagebuildah/build.go b/vendor/github.com/projectatomic/buildah/imagebuildah/build.go index dd8421807..5472c1db5 100644 --- a/vendor/github.com/projectatomic/buildah/imagebuildah/build.go +++ b/vendor/github.com/projectatomic/buildah/imagebuildah/build.go @@ -51,7 +51,7 @@ type BuildOptions struct { ContextDirectory string // PullPolicy controls whether or not we pull images. It should be one // of PullIfMissing, PullAlways, or PullNever. - PullPolicy int + PullPolicy buildah.PullPolicy // Registry is a value which is prepended to the image's name, if it // needs to be pulled and the image name alone can not be resolved to a // reference to a source image. No separator is implicitly added. @@ -113,6 +113,13 @@ type BuildOptions struct { DefaultMountsFilePath string // IIDFile tells the builder to write the image ID to the specified file IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool + // Labels metadata for an image + Labels []string + // Annotation metadata for an image + Annotations []string } // Executor is a buildah-based implementation of the imagebuilder.Executor @@ -124,7 +131,7 @@ type Executor struct { store storage.Store contextDir string builder *buildah.Builder - pullPolicy int + pullPolicy buildah.PullPolicy registry string transport string ignoreUnrecognizedInstructions bool @@ -150,6 +157,9 @@ type Executor struct { commonBuildOptions *buildah.CommonBuildOptions defaultMountsFilePath string iidfile string + squash bool + labels []string + annotations []string } // withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME. @@ -482,6 +492,9 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { commonBuildOptions: options.CommonBuildOpts, defaultMountsFilePath: options.DefaultMountsFilePath, iidfile: options.IIDFile, + squash: options.Squash, + labels: append([]string{}, options.Labels...), + annotations: append([]string{}, options.Annotations...), } if exec.err == nil { exec.err = os.Stderr @@ -673,6 +686,22 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder) (err er for k, v := range config.Labels { b.builder.SetLabel(k, v) } + for _, labelSpec := range b.labels { + label := strings.SplitN(labelSpec, "=", 2) + if len(label) > 1 { + b.builder.SetLabel(label[0], label[1]) + } else { + b.builder.SetLabel(label[0], "") + } + } + for _, annotationSpec := range b.annotations { + annotation := strings.SplitN(annotationSpec, "=", 2) + if len(annotation) > 1 { + b.builder.SetAnnotation(annotation[0], annotation[1]) + } else { + b.builder.SetAnnotation(annotation[0], "") + } + } if imageRef != nil { logName := transports.ImageName(imageRef) logrus.Debugf("COMMIT %q", logName) @@ -692,6 +721,7 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder) (err er ReportWriter: b.reportWriter, PreferredManifestType: b.outputFormat, IIDFile: b.iidfile, + Squash: b.squash, } imgID, err := b.builder.Commit(ctx, imageRef, options) if err != nil { diff --git a/vendor/github.com/projectatomic/buildah/pkg/cli/common.go b/vendor/github.com/projectatomic/buildah/pkg/cli/common.go index eeabc3ee7..e65dba2bd 100644 --- a/vendor/github.com/projectatomic/buildah/pkg/cli/common.go +++ b/vendor/github.com/projectatomic/buildah/pkg/cli/common.go @@ -11,6 +11,10 @@ import ( var ( BudFlags = []cli.Flag{ + cli.StringSliceFlag{ + Name: "annotation", + Usage: "Set metadata for an image (default [])", + }, cli.StringFlag{ Name: "authfile", Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", @@ -53,6 +57,10 @@ var ( Name: "iidfile", Usage: "Write the image ID to the file", }, + cli.StringSliceFlag{ + Name: "label", + Usage: "Set metadata for an image (default [])", + }, cli.BoolFlag{ Name: "no-cache", Usage: "Do not use caching for the container build. Buildah does not currently support caching so this is a NOOP.", @@ -139,11 +147,11 @@ var ( }, cli.StringSliceFlag{ Name: "security-opt", - Usage: "security Options (default [])", + Usage: "security options (default [])", }, cli.StringFlag{ Name: "shm-size", - Usage: "size of `/dev/shm`. The format is `<number><unit>`.", + Usage: "size of '/dev/shm'. The format is `<number><unit>`.", Value: "65536k", }, cli.StringSliceFlag{ diff --git a/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go b/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go index 6512aad52..eb7be9c1e 100644 --- a/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go +++ b/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go @@ -88,7 +88,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *buildah.CommonBuildOpt } con := strings.SplitN(opt, "=", 2) if len(con) != 2 { - return errors.Errorf("Invalid --security-opt 1: %q", opt) + return errors.Errorf("Invalid --security-opt name=value pair: %q", opt) } switch con[0] { diff --git a/vendor/github.com/projectatomic/buildah/pull.go b/vendor/github.com/projectatomic/buildah/pull.go index 9b8578651..edfaa6216 100644 --- a/vendor/github.com/projectatomic/buildah/pull.go +++ b/vendor/github.com/projectatomic/buildah/pull.go @@ -60,10 +60,15 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef if err != nil { return "", errors.Wrapf(err, "error loading manifest for %q", srcRef) } + // if index.json has no reference name, compute the image digest instead if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" { - return "", errors.Errorf("error, archive doesn't have a name annotation. Cannot store image with no name") + name, err = getImageDigest(ctx, srcRef, nil) + if err != nil { + return "", err + } + } else { + name = manifest.Annotations["org.opencontainers.image.ref.name"] } - name = manifest.Annotations["org.opencontainers.image.ref.name"] case util.DirTransport: // supports pull from a directory name = split[1] diff --git a/vendor/github.com/projectatomic/buildah/run.go b/vendor/github.com/projectatomic/buildah/run.go index 50edc9434..0af21b7f0 100644 --- a/vendor/github.com/projectatomic/buildah/run.go +++ b/vendor/github.com/projectatomic/buildah/run.go @@ -2,16 +2,23 @@ package buildah import ( "bufio" + "bytes" "encoding/json" "fmt" "io" "io/ioutil" + "net" "os" "os/exec" "path/filepath" + "strconv" "strings" + "sync" + "syscall" + "time" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/reexec" "github.com/docker/docker/profiles/seccomp" units "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" @@ -22,6 +29,7 @@ import ( "github.com/projectatomic/libpod/pkg/secrets" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh/terminal" + "golang.org/x/sys/unix" ) const ( @@ -29,12 +37,17 @@ const ( DefaultWorkingDir = "/" // DefaultRuntime is the default command to use to run the container. DefaultRuntime = "runc" + // runUsingRuntimeCommand is a command we use as a key for reexec + runUsingRuntimeCommand = Package + "-runtime" ) +// TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal. +type TerminalPolicy int + const ( // DefaultTerminal indicates that this Run invocation should be // connected to a pseudoterminal if we're connected to a terminal. - DefaultTerminal = iota + DefaultTerminal TerminalPolicy = iota // WithoutTerminal indicates that this Run invocation should NOT be // connected to a pseudoterminal. WithoutTerminal @@ -43,6 +56,19 @@ const ( WithTerminal ) +// String converts a TerminalPoliicy into a string. +func (t TerminalPolicy) String() string { + switch t { + case DefaultTerminal: + return "DefaultTerminal" + case WithoutTerminal: + return "WithoutTerminal" + case WithTerminal: + return "WithTerminal" + } + return fmt.Sprintf("unrecognized terminal setting %d", t) +} + // RunOptions can be used to alter how a command is run in the container. type RunOptions struct { // Hostname is the hostname we set for the running container. @@ -72,7 +98,7 @@ type RunOptions struct { // terminal is used if os.Stdout is connected to a terminal, but that // decision can be overridden by specifying either WithTerminal or // WithoutTerminal. - Terminal int + Terminal TerminalPolicy // Quiet tells the run to turn off output to stdout. Quiet bool } @@ -114,7 +140,7 @@ func addHostsToFile(hosts []string, filename string) error { } func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) error { - // RESOURCES - CPU + // Resources - CPU if commonOpts.CPUPeriod != 0 { g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod) } @@ -131,7 +157,7 @@ func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems) } - // RESOURCES - MEMORY + // Resources - Memory if commonOpts.Memory != 0 { g.SetLinuxResourcesMemoryLimit(commonOpts.Memory) } @@ -139,22 +165,21 @@ func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap) } + // cgroup membership if commonOpts.CgroupParent != "" { g.SetLinuxCgroupsPath(commonOpts.CgroupParent) } + // Other process resource limits if err := addRlimits(commonOpts.Ulimit, g); err != nil { return err } - if err := addHostsToFile(commonOpts.AddHost, "/etc/hosts"); err != nil { - return err - } - logrus.Debugln("Resources:", commonOpts) + logrus.Debugf("Resources: %#v", commonOpts) return nil } -func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles, builtinVolumes, volumeMounts []string, shmSize string) error { +func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string) error { // The passed-in mounts matter the most to us. mounts := make([]specs.Mount, len(optionMounts)) copy(mounts, optionMounts) @@ -179,14 +204,14 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts mounts = append(mounts, specMount) } // Add bind mounts for important files, unless they conflict. - for _, boundFile := range bindFiles { - if haveMount(boundFile) { + for dest, src := range bindFiles { + if haveMount(dest) { // Already have something to mount there, so skip this one. continue } mounts = append(mounts, specs.Mount{ - Source: boundFile, - Destination: boundFile, + Source: src, + Destination: dest, Type: "bind", Options: []string{"rbind", "ro"}, }) @@ -293,6 +318,28 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts return nil } +// addNetworkConfig copies files from host and sets them up to bind mount into container +func (b *Builder) addNetworkConfig(rdir, hostPath string) (string, error) { + stat, err := os.Stat(hostPath) + if err != nil { + return "", errors.Wrapf(err, "stat %q failed", hostPath) + } + + buf, err := ioutil.ReadFile(hostPath) + if err != nil { + return "", errors.Wrapf(err, "opening %q failed", hostPath) + } + cfile := filepath.Join(rdir, filepath.Base(hostPath)) + if err := ioutil.WriteFile(cfile, buf, stat.Mode()); err != nil { + return "", errors.Wrapf(err, "opening %q failed", cfile) + } + if err = label.Relabel(cfile, b.MountLabel, false); err != nil { + return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID) + } + + return cfile, nil +} + // Run runs the specified command in the container's root filesystem. func (b *Builder) Run(command []string, options RunOptions) error { var user specs.User @@ -399,10 +446,10 @@ func (b *Builder) Run(command []string, options RunOptions) error { return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd) } - //Security Opts + // Set the apparmor profile name. g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile) - // HANDLE SECCOMP + // Set the seccomp configuration using the specified profile name. if b.CommonBuildOpts.SeccompProfilePath != "unconfined" { if b.CommonBuildOpts.SeccompProfilePath != "" { seccompProfile, err := ioutil.ReadFile(b.CommonBuildOpts.SeccompProfilePath) @@ -430,37 +477,580 @@ func (b *Builder) Run(command []string, options RunOptions) error { Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, } g.AddMount(cgroupMnt) + hostFile, err := b.addNetworkConfig(path, "/etc/hosts") + if err != nil { + return err + } + resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf") + if err != nil { + return err + } - bindFiles := []string{"/etc/hosts", "/etc/resolv.conf"} + if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil { + return err + } + + bindFiles := map[string]string{ + "/etc/hosts": hostFile, + "/etc/resolv.conf": resolvFile, + } err = b.setupMounts(mountPoint, spec, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize) if err != nil { return errors.Wrapf(err, "error resolving mountpoints for container") } - specbytes, err := json.Marshal(spec) + return b.runUsingRuntimeSubproc(options, spec, mountPoint, path, Package+"-"+filepath.Base(path)) +} + +type runUsingRuntimeSubprocOptions struct { + Options RunOptions + Spec *specs.Spec + RootPath string + BundlePath string + ContainerName string +} + +func (b *Builder) runUsingRuntimeSubproc(options RunOptions, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { + var confwg sync.WaitGroup + config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ + Options: options, + Spec: spec, + RootPath: rootPath, + BundlePath: bundlePath, + ContainerName: containerName, + }) + if conferr != nil { + return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) + } + cmd := reexec.Command(runUsingRuntimeCommand) + cmd.Dir = bundlePath + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())) + preader, pwriter, err := os.Pipe() if err != nil { - return err + return errors.Wrapf(err, "error creating configuration pipe") + } + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + confwg.Done() + }() + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + defer preader.Close() + defer pwriter.Close() + err = cmd.Run() + confwg.Wait() + if err == nil { + return conferr } - err = ioutils.AtomicWriteFile(filepath.Join(path, "config.json"), specbytes, 0600) + return err +} + +func init() { + reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain) +} + +func runUsingRuntimeMain() { + var options runUsingRuntimeSubprocOptions + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + } + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + // Set ourselves up to read the container's exit status. We're doing this in a child process + // so that we won't mess with the setting in a caller of the library. + if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil { + fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err) + os.Exit(1) + } + // Run the container, start to finish. + status, err := runUsingRuntime(options.Options, options.Spec, options.RootPath, options.BundlePath, options.ContainerName) if err != nil { - return errors.Wrapf(err, "error storing runtime configuration") + fmt.Fprintf(os.Stderr, "error running container: %v\n", err) + os.Exit(1) + } + // Pass the container's exit status back to the caller by exiting with the same status. + if status.Exited() { + os.Exit(status.ExitStatus()) + } else if status.Signaled() { + fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal()) + os.Exit(1) } + os.Exit(1) +} + +func runUsingRuntime(options RunOptions, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) { + // Write the runtime configuration. + specbytes, err := json.Marshal(spec) + if err != nil { + return 1, err + } + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + return 1, errors.Wrapf(err, "error storing runtime configuration") + } + logrus.Debugf("config = %v", string(specbytes)) + + // Decide which runtime to use. runtime := options.Runtime if runtime == "" { runtime = DefaultRuntime } - args := append(options.Args, "run", "-b", path, Package+"-"+b.ContainerID) - cmd := exec.Command(runtime, args...) - cmd.Dir = mountPoint - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - if options.Quiet { - cmd.Stdout = nil + + // Default to not specifying a console socket location. + moreCreateArgs := func() []string { return nil } + // Default to just passing down our stdio. + getCreateStdio := func() (*os.File, *os.File, *os.File) { return os.Stdin, os.Stdout, os.Stderr } + + // Figure out how we're doing stdio handling, and create pipes and sockets. + var stdio sync.WaitGroup + var consoleListener *net.UnixListener + stdioPipe := make([][]int, 3) + copyConsole := false + copyStdio := false + finishCopy := make([]int, 2) + if err = unix.Pipe(finishCopy); err != nil { + return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") } - cmd.Stderr = os.Stderr - err = cmd.Run() + finishedCopy := make(chan struct{}) + if spec.Process != nil { + if spec.Process.Terminal { + copyConsole = true + // Create a listening socket for accepting the container's terminal's PTY master. + socketPath := filepath.Join(bundlePath, "console.sock") + consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"}) + if err != nil { + return 1, errors.Wrapf(err, "error creating socket to receive terminal descriptor") + } + // Add console socket arguments. + moreCreateArgs = func() []string { return []string{"--console-socket", socketPath} } + } else { + copyStdio = true + // Create pipes to use for relaying stdio. + for i := range stdioPipe { + stdioPipe[i] = make([]int, 2) + if err = unix.Pipe(stdioPipe[i]); err != nil { + return 1, errors.Wrapf(err, "error creating pipe for container FD %d", i) + } + } + // Set stdio to our pipes. + getCreateStdio = func() (*os.File, *os.File, *os.File) { + stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin") + stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout") + stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr") + return stdin, stdout, stderr + } + } + } else { + if options.Quiet { + // Discard stdout. + getCreateStdio = func() (*os.File, *os.File, *os.File) { + return os.Stdin, nil, os.Stderr + } + } + } + + // Build the commands that we'll execute. + pidFile := filepath.Join(bundlePath, "pid") + args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs()...), containerName) + create := exec.Command(runtime, args...) + create.Dir = bundlePath + stdin, stdout, stderr := getCreateStdio() + create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr + if create.SysProcAttr == nil { + create.SysProcAttr = &syscall.SysProcAttr{} + } + runSetDeathSig(create) + + args = append(options.Args, "start", containerName) + start := exec.Command(runtime, args...) + start.Dir = bundlePath + start.Stderr = os.Stderr + runSetDeathSig(start) + + args = append(options.Args, "kill", containerName) + kill := exec.Command(runtime, args...) + kill.Dir = bundlePath + kill.Stderr = os.Stderr + runSetDeathSig(kill) + + args = append(options.Args, "delete", containerName) + del := exec.Command(runtime, args...) + del.Dir = bundlePath + del.Stderr = os.Stderr + runSetDeathSig(del) + + // Actually create the container. + err = create.Run() if err != nil { - logrus.Debugf("error running runc %v: %v", spec.Process.Args, err) + return 1, errors.Wrapf(err, "error creating container for %v", spec.Process.Args) + } + defer func() { + err2 := del.Run() + if err2 != nil { + if err == nil { + err = errors.Wrapf(err2, "error deleting container") + } else { + logrus.Infof("error deleting container: %v", err2) + } + } + }() + + // Make sure we read the container's exit status when it exits. + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return 1, errors.Wrapf(err, "error reading pid from %q", pidFile) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) + } + var reaping sync.WaitGroup + reaping.Add(1) + go func() { + defer reaping.Done() + var err error + _, err = unix.Wait4(pid, &wstatus, 0, nil) + if err != nil { + wstatus = 0 + logrus.Errorf("error waiting for container child process: %v\n", err) + } + }() + + if copyStdio { + // We don't need the ends of the pipes that belong to the container. + stdin.Close() + if stdout != nil { + stdout.Close() + } + stderr.Close() + } + + // Handle stdio for the container in the background. + stdio.Add(1) + go runCopyStdio(&stdio, copyStdio, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy) + + // Start the container. + err = start.Run() + if err != nil { + return 1, errors.Wrapf(err, "error starting container") + } + stopped := false + defer func() { + if !stopped { + err2 := kill.Run() + if err2 != nil { + if err == nil { + err = errors.Wrapf(err2, "error stopping container") + } else { + logrus.Infof("error stopping container: %v", err2) + } + } + } + }() + + // Wait for the container to exit. + for { + now := time.Now() + var state specs.State + args = append(options.Args, "state", containerName) + stat := exec.Command(runtime, args...) + stat.Dir = bundlePath + stat.Stderr = os.Stderr + stateOutput, stateErr := stat.Output() + if stateErr != nil { + return 1, errors.Wrapf(stateErr, "error reading container state") + } + if err = json.Unmarshal(stateOutput, &state); err != nil { + return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput)) + } + switch state.Status { + case "running": + case "stopped": + stopped = true + default: + return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) + } + if stopped { + break + } + select { + case <-finishedCopy: + stopped = true + case <-time.After(time.Until(now.Add(100 * time.Millisecond))): + continue + } + if stopped { + break + } + } + + // Close the writing end of the stop-handling-stdio notification pipe. + unix.Close(finishCopy[1]) + // Wait for the stdio copy goroutine to flush. + stdio.Wait() + // Wait until we finish reading the exit status. + reaping.Wait() + + return wstatus, nil +} + +func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}) { + defer func() { + unix.Close(finishCopy[0]) + if copyStdio { + unix.Close(stdioPipe[unix.Stdin][1]) + unix.Close(stdioPipe[unix.Stdout][0]) + unix.Close(stdioPipe[unix.Stderr][0]) + } + stdio.Done() + finishedCopy <- struct{}{} + }() + // If we're not doing I/O handling, we're done. + if !copyConsole && !copyStdio { + return + } + terminalFD := -1 + if copyConsole { + // Accept a connection over our listening socket. + fd, err := runAcceptTerminal(consoleListener) + if err != nil { + logrus.Errorf("%v", err) + return + } + terminalFD = fd + // Set our terminal's mode to raw, to pass handling of special + // terminal input to the terminal in the container. + state, err := terminal.MakeRaw(unix.Stdin) + if err != nil { + logrus.Warnf("error setting terminal state: %v", err) + } else { + defer func() { + if err = terminal.Restore(unix.Stdin, state); err != nil { + logrus.Errorf("unable to restore terminal state: %v", err) + } + }() + // FIXME - if we're connected to a terminal, we should be + // passing the updated terminal size down when we receive a + // SIGWINCH. + } + } + // Track how many descriptors we're expecting data from. + reading := 0 + // Map describing where data on an incoming descriptor should go. + relayMap := make(map[int]int) + // Map describing incoming descriptors. + relayDesc := make(map[int]string) + // Buffers. + relayBuffer := make(map[int]*bytes.Buffer) + if copyConsole { + // Input from our stdin, output from the terminal descriptor. + relayMap[unix.Stdin] = terminalFD + relayDesc[unix.Stdin] = "stdin" + relayBuffer[unix.Stdin] = new(bytes.Buffer) + relayMap[terminalFD] = unix.Stdout + relayDesc[terminalFD] = "container terminal output" + relayBuffer[terminalFD] = new(bytes.Buffer) + reading = 2 + } + if copyStdio { + // Input from our stdin, output from the stdout and stderr pipes. + relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] + relayDesc[unix.Stdin] = "stdin" + relayBuffer[unix.Stdin] = new(bytes.Buffer) + relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout + relayDesc[stdioPipe[unix.Stdout][0]] = "container stdout" + relayBuffer[stdioPipe[unix.Stdout][0]] = new(bytes.Buffer) + relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr + relayDesc[stdioPipe[unix.Stderr][0]] = "container stderr" + relayBuffer[stdioPipe[unix.Stderr][0]] = new(bytes.Buffer) + reading = 3 + } + // Set our reading descriptors to non-blocking. + for fd := range relayMap { + if err := unix.SetNonblock(fd, true); err != nil { + logrus.Errorf("error setting %s to nonblocking: %v", relayDesc[fd], err) + return + } + } + // Pass data back and forth. + for { + // Start building the list of descriptors to poll. + pollFds := make([]unix.PollFd, 0, reading+1) + // Poll for a notification that we should stop handling stdio. + pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP}) + // Poll on our reading descriptors. + for rfd := range relayMap { + pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP}) + } + buf := make([]byte, 8192) + // Wait for new data from any input descriptor, or a notification that we're done. + nevents, err := unix.Poll(pollFds, -1) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + case syscall.EINTR: + continue + default: + logrus.Errorf("unable to wait for stdio/terminal data to relay: %v", err) + return + } + } else { + logrus.Errorf("unable to wait for stdio/terminal data to relay: %v", err) + return + } + } + if nevents == 0 { + logrus.Errorf("unexpected no data, no error waiting for terminal data to relay") + return + } + var removes []int + for _, pollFd := range pollFds { + // If this descriptor's just been closed from the other end, mark it for + // removal from the set that we're checking for. + if pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + removes = append(removes, int(pollFd.Fd)) + } + // If the EPOLLIN flag isn't set, then there's no data to be read from this descriptor. + if pollFd.Revents&unix.POLLIN == 0 { + // If we're using pipes and it's our stdin, close the writing end + // of the corresponding pipe. + if copyStdio && int(pollFd.Fd) == unix.Stdin { + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + continue + } + // Copy whatever we read to wherever it needs to be sent. + readFD := int(pollFd.Fd) + writeFD, needToRelay := relayMap[readFD] + if needToRelay { + n, err := unix.Read(readFD, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logrus.Errorf("unable to read %s: %v", relayDesc[readFD], err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logrus.Errorf("unable to wait for %s data to relay: %v", relayDesc[readFD], err) + } + continue + } + // If it's zero-length on our stdin and we're + // using pipes, it's an EOF, so close the stdin + // pipe's writing end. + if n == 0 && copyStdio && int(pollFd.Fd) == unix.Stdin { + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + if n > 0 { + // Buffer the data in case we're blocked on where they need to go. + relayBuffer[readFD].Write(buf[:n]) + // Try to drain the buffer. + n, err = unix.Write(writeFD, relayBuffer[readFD].Bytes()) + if err != nil { + logrus.Errorf("unable to write %s: %v", relayDesc[readFD], err) + return + } + relayBuffer[readFD].Next(n) + } + } + } + // Remove any descriptors which we don't need to poll any more from the poll descriptor list. + for _, remove := range removes { + delete(relayMap, remove) + reading-- + } + if reading == 0 { + // We have no more open descriptors to read, so we can stop now. + return + } + // If the we-can-return pipe had anything for us, we're done. + for _, pollFd := range pollFds { + if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 { + // The pipe is closed, indicating that we can stop now. + return + } + } + } +} + +func runAcceptTerminal(consoleListener *net.UnixListener) (int, error) { + defer consoleListener.Close() + c, err := consoleListener.AcceptUnix() + if err != nil { + return -1, errors.Wrapf(err, "error accepting socket descriptor connection") + } + defer c.Close() + // Expect a control message over our new connection. + b := make([]byte, 8192) + oob := make([]byte, 8192) + n, oobn, _, _, err := c.ReadMsgUnix(b, oob) + if err != nil { + return -1, errors.Wrapf(err, "error reading socket descriptor: %v") + } + if n > 0 { + logrus.Debugf("socket descriptor is for %q", string(b[:n])) + } + if oobn > len(oob) { + return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn) + } + // Parse the control message. + scm, err := unix.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message") + } + logrus.Debugf("control messages: %v", scm) + // Expect to get a descriptor. + terminalFD := -1 + for i := range scm { + fds, err := unix.ParseUnixRights(&scm[i]) + if err != nil { + return -1, errors.Wrapf(err, "error parsing unix rights control message: %v") + } + logrus.Debugf("fds: %v", fds) + if len(fds) == 0 { + continue + } + terminalFD = fds[0] + break + } + if terminalFD == -1 { + return -1, errors.Errorf("unable to read terminal descriptor") + } + // Set the pseudoterminal's size to match our own. + winsize, err := unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ) + if err != nil { + logrus.Warnf("error reading size of controlling terminal: %v", err) + return terminalFD, nil + } + err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize) + if err != nil { + logrus.Warnf("error setting size of container pseudoterminal: %v", err) + } + return terminalFD, nil +} + +func runSetDeathSig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + if cmd.SysProcAttr.Pdeathsig == 0 { + cmd.SysProcAttr.Pdeathsig = syscall.SIGTERM } - return err } diff --git a/vendor/github.com/projectatomic/buildah/util/util.go b/vendor/github.com/projectatomic/buildah/util/util.go index 41bd46cb3..e8539f978 100644 --- a/vendor/github.com/projectatomic/buildah/util/util.go +++ b/vendor/github.com/projectatomic/buildah/util/util.go @@ -70,7 +70,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto } } - // If the image is from a different transport + // If the image includes a transport's name as a prefix, use it as-is. split := strings.SplitN(name, ":", 2) if len(split) == 2 { if _, ok := Transports[split[0]]; ok { @@ -91,8 +91,16 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto // If this domain can cause us to insert something in the middle, check if that happened. repoPath := reference.Path(named) domain := reference.Domain(named) + tag := "" + if tagged, ok := named.(reference.Tagged); ok { + tag = ":" + tagged.Tag() + } + digest := "" + if digested, ok := named.(reference.Digested); ok { + digest = "@" + digested.Digest().String() + } defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/" - if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):]) == name { + if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):])+tag+digest == name { // Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with. return []string{name} } diff --git a/vendor/github.com/projectatomic/buildah/vendor.conf b/vendor/github.com/projectatomic/buildah/vendor.conf index be0b04e4a..50f91b072 100644 --- a/vendor/github.com/projectatomic/buildah/vendor.conf +++ b/vendor/github.com/projectatomic/buildah/vendor.conf @@ -1,27 +1,32 @@ -github.com/BurntSushi/toml master -github.com/Nvveen/Gotty master github.com/blang/semver master +github.com/BurntSushi/toml master +github.com/containerd/continuity master +github.com/containernetworking/cni v0.6.0 github.com/containers/image master -github.com/containers/storage master +github.com/containers/storage 0b8ab959bba614a4f88bb3791dbc078c3d47f259 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9 +github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/engine-api master github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 -github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 github.com/fsouza/go-dockerclient master github.com/ghodss/yaml master +github.com/gogo/protobuf master github.com/golang/glog master github.com/gorilla/context master github.com/gorilla/mux master +github.com/hashicorp/errwrap master github.com/hashicorp/go-cleanhttp master +github.com/hashicorp/go-multierror master github.com/imdario/mergo master github.com/mattn/go-runewidth master github.com/mattn/go-shellwords master github.com/mistifyio/go-zfs master github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c github.com/mtrmac/gpgme master +github.com/Nvveen/Gotty master github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runc master @@ -32,11 +37,16 @@ github.com/openshift/imagebuilder master github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 github.com/pborman/uuid master github.com/pkg/errors master +github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac +github.com/projectatomic/libpod e686269da34ed4208f4ed517c0587ab38e8eaf2c github.com/sirupsen/logrus master github.com/syndtr/gocapability master github.com/tchap/go-patricia master github.com/urfave/cli master github.com/vbatts/tar-split v0.10.2 +github.com/xeipuuv/gojsonpointer master +github.com/xeipuuv/gojsonreference master +github.com/xeipuuv/gojsonschema master golang.org/x/crypto master golang.org/x/net master golang.org/x/sys master @@ -46,12 +56,3 @@ gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b k8s.io/apimachinery master k8s.io/client-go master k8s.io/kubernetes master -github.com/hashicorp/go-multierror master -github.com/hashicorp/errwrap master -github.com/xeipuuv/gojsonschema master -github.com/xeipuuv/gojsonreference master -github.com/containerd/continuity master -github.com/gogo/protobuf master -github.com/xeipuuv/gojsonpointer master -github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac -github.com/projectatomic/libpod master |