diff options
Diffstat (limited to 'vendor')
39 files changed, 2505 insertions, 272 deletions
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index d42246d53..d67a481f1 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -292,7 +292,7 @@ func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.Fil break } // combine the filename with the dest directory - fpath := strings.TrimPrefix(path, options.ContextDir) + fpath := strings.TrimPrefix(path, esrc) if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil { return errors.Wrapf(err, "error copying %q to %q", path, dest) } diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 8b076630f..b6e6545ec 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -26,7 +26,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.8-dev" + Version = "1.8.0" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to @@ -282,6 +282,8 @@ type CommonBuildOptions struct { CPUSetCPUs string // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. CPUSetMems string + // HTTPProxy determines whether *_proxy env vars from the build host are passed into the container. + HTTPProxy bool // Memory is the upper limit (in bytes) on how much memory running containers can use. Memory int64 // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go index ff39c2f24..1c3ac65f3 100644 --- a/vendor/github.com/containers/buildah/chroot/run.go +++ b/vendor/github.com/containers/buildah/chroot/run.go @@ -512,7 +512,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io logNamespaceDiagnostics(spec) // If we have configured ID mappings, set them here so that they can apply to the child. - hostUidmap, hostGidmap, err := util.GetHostIDMappings("") + hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") if err != nil { return 1, err } diff --git a/vendor/github.com/containers/buildah/chroot/selinux.go b/vendor/github.com/containers/buildah/chroot/selinux.go index 3e62d743d..08e8f998b 100644 --- a/vendor/github.com/containers/buildah/chroot/selinux.go +++ b/vendor/github.com/containers/buildah/chroot/selinux.go @@ -13,7 +13,7 @@ import ( // setSelinuxLabel sets the process label for child processes that we'll start. func setSelinuxLabel(spec *specs.Spec) error { logrus.Debugf("setting selinux label") - if spec.Process.SelinuxLabel != "" && selinux.EnforceMode() != selinux.Disabled { + if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() { if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil { return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel) } diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index 5e73be881..05d1550b3 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -64,12 +64,9 @@ type CommitOptions struct { // manifest of the new image will reference the blobs rather than // on-disk layers. BlobDirectory string - - // OnBuild is a list of commands to be run by images based on this image - OnBuild []string - // Parent is the base image that this image was created by. - Parent string - + // EmptyLayer tells the builder to omit the diff for the working + // container. + EmptyLayer bool // OmitTimestamp forces epoch 0 as created timestamp to allow for // deterministic, content-addressable builds. OmitTimestamp bool @@ -169,7 +166,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } } // Build an image reference from which we can copy the finished image. - src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp, options.OmitTimestamp) + src, err := b.makeImageRef(options, exportBaseLayers) if err != nil { return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) } diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 1cd329c85..215920cc3 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -56,6 +56,7 @@ type containerImageRef struct { preferredManifestType string exporting bool squash bool + emptyLayer bool tarPath func(path string) (io.ReadCloser, error) parent string blobDirectory string @@ -184,7 +185,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, if err := json.Unmarshal(i.dconfig, &dimage); err != nil { return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err } - dimage.Parent = docker.ID(digest.FromString(i.parent)) + dimage.Parent = docker.ID(i.parent) // Always replace this value, since we're newer than our base image. dimage.Created = created // Clear the list of diffIDs, since we always repopulate it. @@ -290,6 +291,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if err != nil { return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) } + // If we're up to the final layer, but we don't want to include + // a diff for it, we're done. + if i.emptyLayer && layerID == i.layerID { + continue + } // If we're not re-exporting the data, and we're reusing layers individually, reuse // the blobsum and diff IDs. if !i.exporting && !i.squash && layerID != i.layerID { @@ -433,7 +439,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System CreatedBy: i.createdBy, Author: oimage.Author, Comment: i.historyComment, - EmptyLayer: false, + EmptyLayer: i.emptyLayer, } oimage.History = append(oimage.History, onews) dnews := docker.V2S2History{ @@ -441,11 +447,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System CreatedBy: i.createdBy, Author: dimage.Author, Comment: i.historyComment, - EmptyLayer: false, + EmptyLayer: i.emptyLayer, } dimage.History = append(dimage.History, dnews) appendHistory(i.postEmptyLayers) - dimage.Parent = docker.ID(digest.FromString(i.parent)) + dimage.Parent = docker.ID(i.parent) // Sanity check that we didn't just create a mismatch between non-empty layers in the // history and the number of diffIDs. @@ -636,7 +642,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil } -func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time, omitTimestamp bool) (types.ImageReference, error) { +func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.ImageReference, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) if err != nil { @@ -647,6 +653,7 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa name = parsed } } + manifestType := options.PreferredManifestType if manifestType == "" { manifestType = OCIv1ImageManifest } @@ -659,8 +666,8 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker) } created := time.Now().UTC() - if historyTimestamp != nil { - created = historyTimestamp.UTC() + if options.HistoryTimestamp != nil { + created = options.HistoryTimestamp.UTC() } createdBy := b.CreatedBy() if createdBy == "" { @@ -670,13 +677,21 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa } } - if omitTimestamp { + if options.OmitTimestamp { created = time.Unix(0, 0) } + parent := "" + if b.FromImageID != "" { + parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID) + if parentDigest.Validate() == nil { + parent = parentDigest.String() + } + } + ref := &containerImageRef{ store: b.store, - compression: compress, + compression: options.Compression, name: name, names: container.Names, containerID: container.ID, @@ -690,10 +705,11 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa annotations: b.Annotations(), preferredManifestType: manifestType, exporting: exporting, - squash: squash, + squash: options.Squash, + emptyLayer: options.EmptyLayer, tarPath: b.tarPath(), parent: parent, - blobDirectory: blobDirectory, + blobDirectory: options.BlobDirectory, preEmptyLayers: b.PrependedEmptyLayers, postEmptyLayers: b.AppendedEmptyLayers, } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index b692d3bcf..d9909cdc8 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -28,7 +28,7 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" docker "github.com/fsouza/go-dockerclient" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" "github.com/openshift/imagebuilder" "github.com/openshift/imagebuilder/dockerfile/parser" @@ -215,9 +215,12 @@ type Executor struct { forceRmIntermediateCtrs bool imageMap map[string]string // Used to map images that we create to handle the AS construct. containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. + baseMap map[string]bool // Holds the names of every base image, as given. + rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. blobDirectory string excludes []string unusedArgs map[string]struct{} + buildArgs map[string]string } // StageExecutor bundles up what we need to know when executing one stage of a @@ -480,6 +483,19 @@ func (s *StageExecutor) volumeCacheRestore() error { // imagebuilder tells us the instruction was "ADD" and not "COPY". func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { for _, copy := range copies { + // If the file exists, check to see if it's a symlink. + // If it is a symlink, convert to it's target otherwise + // the symlink will be overwritten. + fileDest, _ := os.Lstat(filepath.Join(s.mountPoint, copy.Dest)) + if fileDest != nil { + if fileDest.Mode()&os.ModeSymlink != 0 { + if symLink, err := resolveSymlink(s.mountPoint, copy.Dest); err == nil { + copy.Dest = symLink + } else { + return errors.Wrapf(err, "error reading symbolic link to %q", copy.Dest) + } + } + } if copy.Download { logrus.Debugf("ADD %#v, %#v", excludes, copy) } else { @@ -590,7 +606,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { // UnrecognizedInstruction is called when we encounter an instruction that the // imagebuilder parser didn't understand. func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { - errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command) + errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command)) err := fmt.Sprintf(errStr+"%#v", step) if s.executor.ignoreUnrecognizedInstructions { logrus.Debugf(err) @@ -610,7 +626,7 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { } // NewExecutor creates a new instance of the imagebuilder.Executor interface. -func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { +func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Node) (*Executor, error) { excludes, err := imagebuilder.ParseDockerignore(options.ContextDirectory) if err != nil { return nil, err @@ -656,8 +672,11 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, imageMap: make(map[string]string), containerMap: make(map[string]*buildah.Builder), + baseMap: make(map[string]bool), + rootfsMap: make(map[string]bool), blobDirectory: options.BlobDirectory, unusedArgs: make(map[string]struct{}), + buildArgs: options.Args, } if exec.err == nil { exec.err = os.Stderr @@ -679,6 +698,25 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { exec.unusedArgs[arg] = struct{}{} } } + for _, line := range mainNode.Children { + node := line + for node != nil { // tokens on this line, though we only care about the first + switch strings.ToUpper(node.Value) { // first token - instruction + case "ARG": + arg := node.Next + if arg != nil { + // We have to be careful here - it's either an argument + // and value, or just an argument, since they can be + // separated by either "=" or whitespace. + list := strings.SplitN(arg.Value, "=", 2) + if _, stillUnused := exec.unusedArgs[list[0]]; stillUnused { + delete(exec.unusedArgs, list[0]) + } + } + } + break + } + } return &exec, nil } @@ -845,9 +883,9 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e return imageRef, nil } -// stepRequiresCommit indicates whether or not the step should be followed by -// committing the in-progress container to create an intermediate image. -func (*StageExecutor) stepRequiresCommit(step *imagebuilder.Step) bool { +// stepRequiresLayer indicates whether or not the step should be followed by +// committing a layer container when creating an intermediate image. +func (*StageExecutor) stepRequiresLayer(step *imagebuilder.Step) bool { switch strings.ToUpper(step.Command) { case "ADD", "COPY", "RUN": return true @@ -875,6 +913,10 @@ func (s *StageExecutor) getImageRootfs(ctx context.Context, stage imagebuilder.S func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, base string) (imgID string, ref reference.Canonical, err error) { ib := stage.Builder checkForLayers := s.executor.layers && s.executor.useCache + moreStages := s.index < s.stages-1 + lastStage := !moreStages + imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)]) + rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)]) // If the base image's name corresponds to the result of an earlier // stage, substitute that image's ID for the base image's name here. @@ -896,7 +938,8 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // A helper function to only log "COMMIT" as an explicit step if it's // the very last step of a (possibly multi-stage) build. logCommit := func(output string, instruction int) { - if instruction < len(children)-1 || s.index < s.stages-1 { + moreInstructions := instruction < len(children)-1 + if moreInstructions || moreStages { return } commitMessage := "COMMIT" @@ -921,7 +964,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // squash the contents of the base image. Whichever is // the case, we need to commit() to create a new image. logCommit(s.output, -1) - if imgID, ref, err = s.commit(ctx, ib, getCreatedBy(nil), s.output); err != nil { + if imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(nil), false, s.output); err != nil { return "", nil, errors.Wrapf(err, "error committing base container") } } else { @@ -936,6 +979,8 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b } for i, node := range children { + moreInstructions := i < len(children)-1 + lastInstruction := !moreInstructions // Resolve any arguments in this instruction. step := ib.Step() if err := step.Resolve(node); err != nil { @@ -946,30 +991,19 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b s.executor.log("%s", step.Original) } - // If this instruction declares an argument, remove it from the - // set of arguments that we were passed but which we haven't - // yet seen used by the Dockerfile. - if step.Command == "arg" { - for _, Arg := range step.Args { - list := strings.SplitN(Arg, "=", 2) - if _, stillUnused := s.executor.unusedArgs[list[0]]; stillUnused { - delete(s.executor.unusedArgs, list[0]) - } - } - } - // Check if there's a --from if the step command is COPY or // ADD. Set copyFrom to point to either the context directory // or the root of the container from the specified stage. s.copyFrom = s.executor.contextDir for _, n := range step.Flags { - if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") { + command := strings.ToUpper(step.Command) + if strings.Contains(n, "--from") && (command == "COPY" || command == "ADD") { var mountPoint string arr := strings.Split(n, "=") otherStage, ok := s.executor.stages[arr[1]] if !ok { if mountPoint, err = s.getImageRootfs(ctx, stage, arr[1]); err != nil { - return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", step.Command, arr[1]) + return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1]) } } else { mountPoint = otherStage.mountPoint @@ -984,7 +1018,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // contents of any volumes declared between now and when we // finish. noRunsRemaining := false - if i < len(children)-1 { + if moreInstructions { noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]}) } @@ -996,24 +1030,29 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) } - if i < len(children)-1 { + if moreInstructions { // There are still more instructions to process // for this stage. Make a note of the // instruction in the history that we'll write // for the image when we eventually commit it. now := time.Now() - s.builder.AddPrependedEmptyLayer(&now, getCreatedBy(node), "", "") + s.builder.AddPrependedEmptyLayer(&now, s.executor.getCreatedBy(node), "", "") continue } else { // This is the last instruction for this stage, // so we should commit this container to create - // an image. - logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, ib, getCreatedBy(node), s.output) - if err != nil { - return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) + // an image, but only if it's the last one, or + // if it's used as the basis for a later stage. + if lastStage || imageIsUsedLater { + logCommit(s.output, i) + imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node), false, s.output) + if err != nil { + return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) + } + logImageID(imgID) + } else { + imgID = "" } - logImageID(imgID) break } } @@ -1028,18 +1067,14 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // If we have to commit for this instruction, only assign the // stage's configured output name to the last layer. - if i == len(children)-1 { + if lastInstruction { commitName = s.output } // If we're using the cache, and we've managed to stick with // cached images so far, look for one that matches what we // expect to produce for this instruction. - // Only check at steps where we commit, so that we don't - // abandon the cache at this step just because we can't find an - // image with a history entry in it that we wouldn't have - // committed. - if checkForLayers && (s.stepRequiresCommit(step) || i == len(children)-1) && !(s.executor.squash && i == len(children)-1 && s.index == s.stages-1) { + if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) { cacheID, err = s.layerExists(ctx, node, children[:i]) if err != nil { return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") @@ -1059,17 +1094,32 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // the last step in this stage, add the name to the // image. imgID = cacheID - if commitName != "" && (s.stepRequiresCommit(step) || i == len(children)-1) { - logCommit(s.output, i) + if commitName != "" { + logCommit(commitName, i) if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil { return "", nil, err } logImageID(imgID) } // Update our working container to be based off of the - // cached image, in case we need to read content from - // its root filesystem. - rebase = true + // cached image, if we might need to use it as a basis + // for the next instruction, or if we need the root + // filesystem to match the image contents for the sake + // of a later stage that wants to copy content from it. + rebase = moreInstructions || rootfsIsUsedLater + // If the instruction would affect our configuration, + // process the configuration change so that, if we fall + // off the cache path, the filesystem changes from the + // last cache image will be all that we need, since we + // still don't want to restart using the image's + // configuration blob. + if !s.stepRequiresLayer(step) { + err := ib.Run(step, s, noRunsRemaining) + if err != nil { + logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) + return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) + } + } } else { // If we didn't find a cached image that we could just reuse, // process the instruction directly. @@ -1078,36 +1128,20 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) } - if s.stepRequiresCommit(step) || i == len(children)-1 { - // Either this is the last instruction, or - // there are more instructions and we need to - // create a layer from this one before - // continuing. - // TODO: only commit for the last instruction - // case if we need to use this stage's image as - // a base image later, or if we're the final - // stage. - logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, ib, getCreatedBy(node), commitName) - if err != nil { - return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) - } - logImageID(imgID) - // We only need to build a new container rootfs - // using this image if we plan on making - // further changes to it. Subsequent stages - // that just want to use the rootfs as a source - // for COPY or ADD will be content with what we - // already have. - rebase = i < len(children)-1 - } else { - // There are still more instructions to process - // for this stage, and we don't need to commit - // here. Make a note of the instruction in the - // history for the next commit. - now := time.Now() - s.builder.AddPrependedEmptyLayer(&now, getCreatedBy(node), "", "") + // Create a new image, maybe with a new layer. + logCommit(s.output, i) + imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node), !s.stepRequiresLayer(step), commitName) + if err != nil { + return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) } + logImageID(imgID) + // We only need to build a new container rootfs + // using this image if we plan on making + // further changes to it. Subsequent stages + // that just want to use the rootfs as a source + // for COPY or ADD will be content with what we + // already have. + rebase = moreInstructions } if rebase { @@ -1122,8 +1156,6 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // creating a new working container with the // just-committed or updated cached image as its new // base image. - // TODO: only create a new container if we know that - // we'll need the updated root filesystem. if _, err := s.prepare(ctx, stage, imgID, false, true); err != nil { return "", nil, errors.Wrap(err, "error preparing container for next step") } @@ -1195,13 +1227,13 @@ func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node, // it means that this image is potentially a cached intermediate image from a previous // build. Next we double check that the history of this image is equivalent to the previous // lines in the Dockerfile up till the point we are at in the build. - if layer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] { + if layer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] || layer.ID == s.executor.topLayers[len(s.executor.topLayers)-1] { history, err := s.executor.getImageHistory(ctx, image.ID) if err != nil { return "", errors.Wrapf(err, "error getting history of %q", image.ID) } // children + currNode is the point of the Dockerfile we are currently at. - if historyMatches(append(children, currNode), history) { + if s.executor.historyMatches(append(children, currNode), history) { // This checks if the files copied during build have been changed if the node is // a COPY or ADD command. filesMatch, err := s.copiedFilesMatch(currNode, history[len(history)-1].Created) @@ -1225,21 +1257,26 @@ func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.Hi } ref, err := imageRef.NewImage(ctx, nil) if err != nil { - return nil, errors.Wrap(err, "error creating new image from reference") + return nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID) } + defer ref.Close() oci, err := ref.OCIConfig(ctx) if err != nil { - return nil, errors.Wrapf(err, "error getting oci config of image %q", imageID) + return nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID) } return oci.History, nil } // getCreatedBy returns the command the image at node will be created by. -func getCreatedBy(node *parser.Node) string { +func (b *Executor) getCreatedBy(node *parser.Node) string { if node == nil { return "/bin/sh" } if node.Value == "run" { + buildArgs := b.getBuildArgs() + if buildArgs != "" { + return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] + } return "/bin/sh -c " + node.Original[4:] } return "/bin/sh -c #(nop) " + node.Original @@ -1249,12 +1286,23 @@ func getCreatedBy(node *parser.Node) string { // in the Dockerfile till the point of build we are at. // Used to verify whether a cache of the intermediate image exists and whether // to run the build again. -func historyMatches(children []*parser.Node, history []v1.History) bool { +func (b *Executor) historyMatches(children []*parser.Node, history []v1.History) bool { i := len(history) - 1 for j := len(children) - 1; j >= 0; j-- { instruction := children[j].Original if children[j].Value == "run" { instruction = instruction[4:] + buildArgs := b.getBuildArgs() + // If a previous image was built with some build-args but the new build process doesn't have any build-args + // specified, so compare the lengths of the old instruction with the current one + // 11 is the length of "/bin/sh -c " that is used to run the run commands + if buildArgs == "" && len(history[i].CreatedBy) > len(instruction)+11 { + return false + } + // There are build-args, so check if anything with the build-args has changed + if buildArgs != "" && !strings.Contains(history[i].CreatedBy, buildArgs) { + return false + } } if !strings.Contains(history[i].CreatedBy, instruction) { return false @@ -1264,6 +1312,18 @@ func historyMatches(children []*parser.Node, history []v1.History) bool { return true } +// getBuildArgs returns a string of the build-args specified during the build process +// it excludes any build-args that were not used in the build process +func (b *Executor) getBuildArgs() string { + var buildArgs []string + for k, v := range b.buildArgs { + if _, ok := b.unusedArgs[k]; !ok { + buildArgs = append(buildArgs, k+"="+v) + } + } + return strings.Join(buildArgs, " ") +} + // getFilesToCopy goes through node to get all the src files that are copied, added or downloaded. // It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix. // Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character. @@ -1348,7 +1408,7 @@ func urlContentModified(url string, historyTime *time.Time) (bool, error) { // commit writes the container's contents to an image, using a passed-in tag as // the name if there is one, generating a unique ID-based one otherwise. -func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy, output string) (string, reference.Canonical, error) { +func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) { var imageRef types.ImageReference if output != "" { imageRef2, err := s.executor.resolveNameToImageRef(output) @@ -1438,8 +1498,8 @@ func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, cr PreferredManifestType: s.executor.outputFormat, SystemContext: s.executor.systemContext, Squash: s.executor.squash, + EmptyLayer: emptyLayer, BlobDirectory: s.executor.blobDirectory, - Parent: s.builder.FromImageID, } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { @@ -1510,6 +1570,46 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } defer cleanup() + // Build maps of every named base image and every referenced stage root + // filesystem. Individual stages can use them to determine whether or + // not they can skip certain steps near the end of their stages. + for _, stage := range stages { + node := stage.Node // first line + for node != nil { // each line + for _, child := range node.Children { // tokens on this line, though we only care about the first + switch strings.ToUpper(child.Value) { // first token - instruction + case "FROM": + if child.Next != nil { // second token on this line + base := child.Next.Value + if base != "scratch" { + // TODO: this didn't undergo variable and arg + // expansion, so if the AS clause in another + // FROM instruction uses argument values, + // we might not record the right value here. + b.baseMap[base] = true + logrus.Debugf("base: %q", base) + } + } + case "ADD", "COPY": + for _, flag := range child.Flags { // flags for this instruction + if strings.HasPrefix(flag, "--from=") { + // TODO: this didn't undergo variable and + // arg expansion, so if the previous stage + // was named using argument values, we might + // not record the right value here. + rootfs := flag[7:] + b.rootfsMap[rootfs] = true + logrus.Debugf("rootfs: %q", rootfs) + } + } + } + break + } + node = node.Next // next line + } + } + + // Run through the build stages, one at a time. for stageIndex, stage := range stages { var lastErr error @@ -1555,7 +1655,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // If this is an intermediate stage, make a note of the ID, so // that we can look it up later. - if stageIndex < len(stages)-1 { + if stageIndex < len(stages)-1 && imageID != "" { b.imageMap[stage.Name] = imageID // We're not populating the cache with intermediate // images, so add this one to the list of images that @@ -1671,7 +1771,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt } mainNode.Children = append(mainNode.Children, additionalNode.Children...) } - exec, err := NewExecutor(store, options) + exec, err := NewExecutor(store, options, mainNode) if err != nil { return "", nil, errors.Wrapf(err, "error creating build executor") } diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go index 86bf7653b..0789c2b3c 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go +++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go @@ -129,20 +129,20 @@ func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) { func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { var timeIsGreater bool - // the Walk below doesn't work if rootdir and path are equal - if rootdir == path { - return false, nil - } - // Convert historyTime from string to time.Time for comparison histTime, err := time.Parse(time.RFC3339Nano, historyTime) if err != nil { return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime) } + + // Since we are chroot in rootdir, we want a relative path, i.e (path - rootdir) + relPath, err := filepath.Rel(rootdir, path) + if err != nil { + return false, errors.Wrapf(err, "error making path %q relative to %q", path, rootdir) + } + // Walk the file tree and check the time stamps. - // Since we are chroot in rootdir, only want the path of the actual filename, i.e path - rootdir. - // +1 to account for the extra "/" (e.g rootdir=/home/user/mydir, path=/home/user/mydir/myfile.json) - err = filepath.Walk(path[len(rootdir)+1:], func(path string, info os.FileInfo, err error) error { + err = filepath.Walk(relPath, func(path string, info os.FileInfo, err error) error { // If using cached images, it is possible for files that are being copied to come from // previous build stages. But if using cached images, then the copied file won't exist // since a container won't have been created for the previous build stage and info will be nil. @@ -154,6 +154,9 @@ func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { if info.Mode()&os.ModeSymlink == os.ModeSymlink { // Evaluate any symlink that occurs to get updated modified information resolvedPath, err := filepath.EvalSymlinks(path) + if err != nil && os.IsNotExist(err) { + return errors.Wrapf(errDanglingSymlink, "%q", path) + } if err != nil { return errors.Wrapf(err, "error evaluating symlink %q", path) } @@ -169,7 +172,12 @@ func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { } return nil }) + if err != nil { + // if error is due to dangling symlink, ignore error and return nil + if errors.Cause(err) == errDanglingSymlink { + return false, nil + } return false, errors.Wrapf(err, "error walking file tree %q", path) } return timeIsGreater, err diff --git a/vendor/github.com/containers/buildah/imagebuildah/errors.go b/vendor/github.com/containers/buildah/imagebuildah/errors.go new file mode 100644 index 000000000..cf299656b --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/errors.go @@ -0,0 +1,7 @@ +package imagebuildah + +import "errors" + +var ( + errDanglingSymlink = errors.New("error evaluating dangling symlink") +) diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index 6c4d14303..7fa0a7777 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -89,6 +89,7 @@ type FromAndBudResults struct { DNSSearch []string DNSServers []string DNSOptions []string + HttpProxy bool Isolation string Memory string MemorySwap string @@ -182,6 +183,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains") fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers") fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options") + fs.BoolVar(&flags.HttpProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables") fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)") fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap") diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index c4e3e4264..cc85136fd 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -86,6 +86,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) { cpuPeriod, _ := c.Flags().GetUint64("cpu-period") cpuQuota, _ := c.Flags().GetInt64("cpu-quota") cpuShares, _ := c.Flags().GetUint64("cpu-shared") + httpProxy, _ := c.Flags().GetBool("http-proxy") ulimit, _ := c.Flags().GetStringSlice("ulimit") commonOpts := &buildah.CommonBuildOptions{ AddHost: addHost, @@ -98,6 +99,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) { DNSSearch: dnsSearch, DNSServers: dnsServers, DNSOptions: dnsOptions, + HTTPProxy: httpProxy, Memory: memoryLimit, MemorySwap: memorySwap, ShmSize: c.Flag("shm-size").Value.String(), diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go index 5b2e7d7d1..33232740e 100644 --- a/vendor/github.com/containers/buildah/pkg/unshare/unshare.go +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go @@ -3,6 +3,7 @@ package unshare import ( + "bufio" "bytes" "fmt" "io" @@ -15,7 +16,7 @@ import ( "sync" "syscall" - "github.com/containers/buildah/util" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -157,7 +158,7 @@ func (c *Cmd) Start() error { } if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { - uidmap, gidmap, err := util.GetHostIDMappings("") + uidmap, gidmap, err := GetHostIDMappings("") if err != nil { fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) return errors.Wrapf(err, "error reading ID mappings in parent") @@ -352,7 +353,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { // Read the set of ID mappings that we're allowed to use. Each // range in /etc/subuid and /etc/subgid file is a starting host // ID and a range size. - uidmap, gidmap, err = util.GetSubIDMappings(me.Username, me.Username) + uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username) bailOnError(err, "error reading allowed ID mappings") if len(uidmap) == 0 { logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) @@ -384,7 +385,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { return } // Read the set of ID mappings that we're currently using. - uidmap, gidmap, err = util.GetHostIDMappings("") + uidmap, gidmap, err = GetHostIDMappings("") bailOnError(err, "error reading current ID mappings") // Just reuse them. for i := range uidmap { @@ -404,6 +405,16 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { err = os.Setenv(UsernsEnvName, "1") bailOnError(err, "error setting %s=1 in environment", UsernsEnvName) + // Set the default isolation type to use the "rootless" method. + if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present { + if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err) + os.Exit(1) + } + } + } + // Reuse our stdio. cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout @@ -446,3 +457,89 @@ func ExecRunnable(cmd Runnable) { } os.Exit(0) } + +// getHostIDMappings reads mappings from the named node under /proc. +func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { + var mappings []specs.LinuxIDMapping + f, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "error reading ID mappings from %q", path) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 3 { + return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + } + cid, err := strconv.ParseUint(fields[0], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) + } + hid, err := strconv.ParseUint(fields[1], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) + } + size, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) + } + mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) + } + return mappings, nil +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + if pid == "" { + pid = "self" + } + uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) + if err != nil { + return nil, nil, err + } + gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) + if err != nil { + return nil, nil, err + } + return uidmap, gidmap, nil +} + +// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. +func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + mappings, err := idtools.NewIDMappings(user, group) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group) + } + var uidmap, gidmap []specs.LinuxIDMapping + for _, m := range mappings.UIDs() { + uidmap = append(uidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + for _, m := range mappings.GIDs() { + gidmap = append(gidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + return uidmap, gidmap, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go index d8d5f6f7a..bf4d567b8 100644 --- a/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go @@ -4,6 +4,9 @@ package unshare import ( "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" ) const ( @@ -29,3 +32,14 @@ func RootlessEnv() []string { // MaybeReexecUsingUserNamespace re-exec the process in a new namespace func MaybeReexecUsingUserNamespace(evenForRoot bool) { } + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + return nil, nil, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 5d28644d7..00eac8e39 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -865,7 +865,7 @@ func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, i if err := g.AddOrReplaceLinuxNamespace(specs.UserNamespace, ""); err != nil { return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace)) } - hostUidmap, hostGidmap, err := util.GetHostIDMappings("") + hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") if err != nil { return false, nil, false, err } @@ -983,6 +983,24 @@ func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, opti func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions) { g.ClearProcessEnv() + if b.CommonBuildOpts.HTTPProxy { + for _, envSpec := range []string{ + "http_proxy", + "HTTP_PROXY", + "https_proxy", + "HTTPS_PROXY", + "ftp_proxy", + "FTP_PROXY", + "no_proxy", + "NO_PROXY", + } { + envVal := os.Getenv(envSpec) + if envVal != "" { + g.AddProcessEnv(envSpec, envVal) + } + } + } + for _, envSpec := range append(b.Env(), options.Env...) { env := strings.SplitN(envSpec, "=", 2) if len(env) > 1 { diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go index 2b850cf9f..e64eb6112 100644 --- a/vendor/github.com/containers/buildah/selinux.go +++ b/vendor/github.com/containers/buildah/selinux.go @@ -4,9 +4,12 @@ package buildah import ( "github.com/opencontainers/runtime-tools/generate" + selinux "github.com/opencontainers/selinux/go-selinux" ) func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { - g.SetProcessSelinuxLabel(processLabel) - g.SetLinuxMountLabel(mountLabel) + if processLabel != "" && selinux.GetEnabled() { + g.SetProcessSelinuxLabel(processLabel) + g.SetLinuxMountLabel(mountLabel) + } } diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index 7f3bbaef4..698d79a81 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -1,13 +1,11 @@ package util import ( - "bufio" "fmt" "io" "net/url" "os" "path" - "strconv" "strings" "syscall" @@ -18,7 +16,6 @@ import ( "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" "github.com/docker/distribution/registry/api/errcode" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -299,92 +296,6 @@ func GetHostRootIDs(spec *specs.Spec) (uint32, uint32, error) { return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0) } -// getHostIDMappings reads mappings from the named node under /proc. -func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { - var mappings []specs.LinuxIDMapping - f, err := os.Open(path) - if err != nil { - return nil, errors.Wrapf(err, "error reading ID mappings from %q", path) - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - fields := strings.Fields(line) - if len(fields) != 3 { - return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) - } - cid, err := strconv.ParseUint(fields[0], 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) - } - hid, err := strconv.ParseUint(fields[1], 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) - } - size, err := strconv.ParseUint(fields[2], 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) - } - mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) - } - return mappings, nil -} - -// GetHostIDMappings reads mappings for the specified process (or the current -// process if pid is "self" or an empty string) from the kernel. -func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - if pid == "" { - pid = "self" - } - uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) - if err != nil { - return nil, nil, err - } - gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) - if err != nil { - return nil, nil, err - } - return uidmap, gidmap, nil -} - -// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. -func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - mappings, err := idtools.NewIDMappings(user, group) - if err != nil { - return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group) - } - var uidmap, gidmap []specs.LinuxIDMapping - for _, m := range mappings.UIDs() { - uidmap = append(uidmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - for _, m := range mappings.GIDs() { - gidmap = append(gidmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - return uidmap, gidmap, nil -} - -// ParseIDMappings parses mapping triples. -func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") - if err != nil { - return nil, nil, err - } - gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") - if err != nil { - return nil, nil, err - } - return uid, gid, nil -} - // GetPolicyContext sets up, initializes and returns a new context for the specified policy func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) { policy, err := signature.DefaultPolicy(ctx) diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf index a77130acb..bec681e5c 100644 --- a/vendor/github.com/containers/buildah/vendor.conf +++ b/vendor/github.com/containers/buildah/vendor.conf @@ -8,7 +8,7 @@ github.com/vbauerster/mpb v3.3.4 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 github.com/boltdb/bolt v1.3.1 -github.com/containers/storage v1.12.2 +github.com/containers/storage v1.12.3 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83 github.com/docker/docker-credential-helpers v0.6.1 diff --git a/vendor/github.com/containers/psgo/psgo.go b/vendor/github.com/containers/psgo/psgo.go index e0f102735..f1936f917 100644 --- a/vendor/github.com/containers/psgo/psgo.go +++ b/vendor/github.com/containers/psgo/psgo.go @@ -93,7 +93,7 @@ func translateDescriptors(descriptors []string) ([]aixFormatDescriptor, error) { } } if !found { - return nil, errors.Wrapf(ErrUnkownDescriptor, "'%s'", d) + return nil, errors.Wrapf(ErrUnknownDescriptor, "'%s'", d) } } @@ -104,8 +104,8 @@ var ( // DefaultDescriptors is the `ps -ef` compatible default format. DefaultDescriptors = []string{"user", "pid", "ppid", "pcpu", "etime", "tty", "time", "args"} - // ErrUnkownDescriptor is returned when an unknown descriptor is parsed. - ErrUnkownDescriptor = errors.New("unknown descriptor") + // ErrUnknownDescriptor is returned when an unknown descriptor is parsed. + ErrUnknownDescriptor = errors.New("unknown descriptor") aixFormatDescriptors = []aixFormatDescriptor{ { @@ -327,7 +327,10 @@ func JoinNamespaceAndProcessInfo(pid string, descriptors []string) ([][]string, dataErr = err return } - unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS) + if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil { + dataErr = err + return + } // extract all pids mentioned in pid's mount namespace pids, err := proc.GetPIDs() diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go index 40b912bb3..aef6becfe 100644 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. -// source: ./containers.go +// source: containers.go package storage diff --git a/vendor/github.com/containers/storage/drivers/copy/copy.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index bcbc61284..d614b78fc 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -1,4 +1,4 @@ -// +build linux +// +build cgo package copy @@ -153,8 +153,8 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { isHardlink := false - switch f.Mode() & os.ModeType { - case 0: // Regular file + switch mode := f.Mode(); { + case mode.IsRegular(): id := fileID{dev: stat.Dev, ino: stat.Ino} if copyMode == Hardlink { isHardlink = true @@ -172,12 +172,12 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { copiedFiles[id] = dstPath } - case os.ModeDir: + case mode.IsDir(): if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { return err } - case os.ModeSymlink: + case mode&os.ModeSymlink != 0: link, err := os.Readlink(srcPath) if err != nil { return err @@ -187,14 +187,15 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { return err } - case os.ModeNamedPipe: + case mode&os.ModeNamedPipe != 0: fallthrough - case os.ModeSocket: + + case mode&os.ModeSocket != 0: if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { return err } - case os.ModeDevice: + case mode&os.ModeDevice != 0: if rsystem.RunningInUserNS() { // cannot create a device if running in user namespace return nil @@ -204,7 +205,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { } default: - return fmt.Errorf("unknown file type for %s", srcPath) + return fmt.Errorf("unknown file type with mode %v for %s", mode, srcPath) } // Everything below is copying metadata from src to dst. All this metadata diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go new file mode 100644 index 000000000..4d44f2f35 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -0,0 +1,19 @@ +// +build !linux !cgo + +package copy + +import "github.com/containers/storage/pkg/chrootarchive" + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota +) + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling soft links +func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go index 58abca477..f63845252 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -119,10 +119,17 @@ func checkDevHasFS(dev string) error { } func verifyBlockDevice(dev string, force bool) error { - if err := checkDevAvailable(dev); err != nil { + realPath, err := filepath.Abs(dev) + if err != nil { + return errors.Errorf("unable to get absolute path for %s: %s", dev, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return errors.Errorf("failed to canonicalise path for %s: %s", dev, err) + } + if err := checkDevAvailable(realPath); err != nil { return err } - if err := checkDevInVG(dev); err != nil { + if err := checkDevInVG(realPath); err != nil { return err } @@ -130,7 +137,7 @@ func verifyBlockDevice(dev string, force bool) error { return nil } - if err := checkDevHasFS(dev); err != nil { + if err := checkDevHasFS(realPath); err != nil { return err } return nil diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 657d9b3ce..5d667d8c6 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -16,7 +16,7 @@ import ( "sync" "syscall" - "github.com/containers/storage/drivers" + graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/drivers/overlayutils" "github.com/containers/storage/drivers/quota" "github.com/containers/storage/pkg/archive" @@ -320,6 +320,8 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI mergedDir := filepath.Join(layerDir, "merged") lower1Dir := filepath.Join(layerDir, "lower1") lower2Dir := filepath.Join(layerDir, "lower2") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") defer func() { // Permitted to fail, since the various subdirectories // can be empty or not even there, and the home might @@ -331,7 +333,9 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) - flags := fmt.Sprintf("lowerdir=%s:%s", lower1Dir, lower2Dir) + _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID) + flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir) if len(flags) < unix.Getpagesize() { err := mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) if err == nil { @@ -341,7 +345,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI logrus.Debugf("overlay test mount with multiple lowers failed %v", err) } } - flags = fmt.Sprintf("lowerdir=%s", lower1Dir) + flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir) if len(flags) < unix.Getpagesize() { err := mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) if err == nil { @@ -677,6 +681,40 @@ func (d *Driver) Remove(id string) error { return nil } +// recreateSymlinks goes through the driver's home directory and checks if the diff directory +// under each layer has a symlink created for it under the linkDir. If the symlink does not +// exist, it creates them +func (d *Driver) recreateSymlinks() error { + // List all the directories under the home directory + dirs, err := ioutil.ReadDir(d.home) + if err != nil { + return fmt.Errorf("error reading driver home directory %q: %v", d.home, err) + } + for _, dir := range dirs { + // Skip over the linkDir + if dir.Name() == linkDir || dir.Mode().IsRegular() { + continue + } + // Read the "link" file under each layer to get the name of the symlink + data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) + if err != nil { + return fmt.Errorf("error reading name of symlink for %q: %v", dir, err) + } + linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) + // Check if the symlink exists, and if it doesn't create it again with the name we + // got from the "link" file + _, err = os.Stat(linkPath) + if err != nil && os.IsNotExist(err) { + if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { + return err + } + } else if err != nil { + return fmt.Errorf("error trying to stat %q: %v", linkPath, err) + } + } + return nil +} + // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { return d.get(id, false, options) @@ -732,7 +770,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } lower = "" } - if lower == "" { + // if it is a "not found" error, that means the symlinks were lost in a sudden reboot + // so call the recreateSymlinks function to go through all the layer dirs and recreate + // the symlinks with the name from their respective "link" files + if lower == "" && os.IsNotExist(err) { + logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) + if err := d.recreateSymlinks(); err != nil { + return "", fmt.Errorf("error recreating the missing symlinks: %v", err) + } + lower = newpath + } else if lower == "" { return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) } } else { @@ -796,7 +843,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) mountProgram.Dir = d.home - return mountProgram.Run() + var b bytes.Buffer + mountProgram.Stderr = &b + err := mountProgram.Run() + if err != nil { + output := b.String() + if output == "" { + output = "<stderr empty>" + } + return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output) + } + return nil } } else if len(mountData) > pageSize { //FIXME: We need to figure out to get this to work with additional stores diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go index 539acfe93..6b40ebd59 100644 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. -// source: ./images.go +// source: images.go package storage diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go index 3a1befcbe..ed8753337 100644 --- a/vendor/github.com/containers/storage/lockfile.go +++ b/vendor/github.com/containers/storage/lockfile.go @@ -58,8 +58,17 @@ func GetROLockfile(path string) (Locker, error) { return getLockfile(path, true) } -// getLockfile is a helper for GetLockfile and GetROLockfile and returns Locker -// based on the path and read-only property. +// getLockfile returns a Locker object, possibly (depending on the platform) +// working inter-process, and associated with the specified path. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. func getLockfile(path string, ro bool) (Locker, error) { lockfilesLock.Lock() defer lockfilesLock.Unlock() @@ -79,7 +88,7 @@ func getLockfile(path string, ro bool) (Locker, error) { } return locker, nil } - locker, err := getLockFile(path, ro) // platform dependent locker + locker, err := createLockerForPath(path, ro) // platform-dependent locker if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go index a9dc64122..8e0f22cb5 100644 --- a/vendor/github.com/containers/storage/lockfile_unix.go +++ b/vendor/github.com/containers/storage/lockfile_unix.go @@ -13,18 +13,51 @@ import ( "golang.org/x/sys/unix" ) -func getLockFile(path string, ro bool) (Locker, error) { - var fd int - var err error +type lockfile struct { + // rwMutex serializes concurrent reader-writer acquisitions in the same process space + rwMutex *sync.RWMutex + // stateMutex is used to synchronize concurrent accesses to the state below + stateMutex *sync.Mutex + counter int64 + file string + fd uintptr + lw string + locktype int16 + locked bool + ro bool +} + +// openLock opens the file at path and returns the corresponding file +// descriptor. Note that the path is opened read-only when ro is set. If ro +// is unset, openLock will open the path read-write and create the file if +// necessary. +func openLock(path string, ro bool) (int, error) { if ro { - fd, err = unix.Open(path, os.O_RDONLY, 0) - } else { - fd, err = unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) + return unix.Open(path, os.O_RDONLY, 0) } + return unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) +} + +// createLockerForPath returns a Locker object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockerForPath(path string, ro bool) (Locker, error) { + // Check if we can open the lock. + fd, err := openLock(path, ro) if err != nil { return nil, errors.Wrapf(err, "error opening %q", path) } - unix.CloseOnExec(fd) + unix.Close(fd) locktype := unix.F_WRLCK if ro { @@ -34,27 +67,12 @@ func getLockFile(path string, ro bool) (Locker, error) { stateMutex: &sync.Mutex{}, rwMutex: &sync.RWMutex{}, file: path, - fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: int16(locktype), locked: false, ro: ro}, nil } -type lockfile struct { - // rwMutex serializes concurrent reader-writer acquisitions in the same process space - rwMutex *sync.RWMutex - // stateMutex is used to synchronize concurrent accesses to the state below - stateMutex *sync.Mutex - counter int64 - file string - fd uintptr - lw string - locktype int16 - locked bool - ro bool -} - // lock locks the lockfile via FCTNL(2) based on the specified type and // command. func (l *lockfile) lock(l_type int16) { @@ -63,7 +81,6 @@ func (l *lockfile) lock(l_type int16) { Whence: int16(os.SEEK_SET), Start: 0, Len: 0, - Pid: int32(os.Getpid()), } switch l_type { case unix.F_RDLCK: @@ -74,7 +91,16 @@ func (l *lockfile) lock(l_type int16) { panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type)) } l.stateMutex.Lock() + defer l.stateMutex.Unlock() if l.counter == 0 { + // If we're the first reference on the lock, we need to open the file again. + fd, err := openLock(l.file, l.ro) + if err != nil { + panic(fmt.Sprintf("error opening %q", l.file)) + } + unix.CloseOnExec(fd) + l.fd = uintptr(fd) + // Optimization: only use the (expensive) fcntl syscall when // the counter is 0. In this case, we're either the first // reader lock or a writer lock. @@ -85,7 +111,6 @@ func (l *lockfile) lock(l_type int16) { l.locktype = l_type l.locked = true l.counter++ - l.stateMutex.Unlock() } // Lock locks the lockfile as a writer. Note that RLock() will be called if @@ -133,6 +158,8 @@ func (l *lockfile) Unlock() { for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { time.Sleep(10 * time.Millisecond) } + // Close the file descriptor on the last unlock. + unix.Close(int(l.fd)) } if l.locktype == unix.F_RDLCK { l.rwMutex.RUnlock() diff --git a/vendor/github.com/containers/storage/lockfile_windows.go b/vendor/github.com/containers/storage/lockfile_windows.go index a3821bfeb..c02069495 100644 --- a/vendor/github.com/containers/storage/lockfile_windows.go +++ b/vendor/github.com/containers/storage/lockfile_windows.go @@ -8,7 +8,20 @@ import ( "time" ) -func getLockFile(path string, ro bool) (Locker, error) { +// createLockerForPath returns a Locker object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockerForPath(path string, ro bool) (Locker, error) { return &lockfile{locked: false}, nil } diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go index c56aa86a2..86f98f16e 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/parser.go +++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go @@ -2,6 +2,8 @@ package idtools import ( "fmt" + "math" + "math/bits" "strconv" "strings" ) @@ -31,10 +33,11 @@ func parseTriple(spec []string) (container, host, size uint32, err error) { // ParseIDMap parses idmap triples from string. func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { + stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) for _, idMapSpec := range mapSpec { idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec)) if len(idSpec)%3 != 0 { - return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) + return nil, stdErr } for i := range idSpec { if i%3 != 0 { @@ -42,7 +45,11 @@ func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) } cid, hid, size, err := parseTriple(idSpec[i : i+3]) if err != nil { - return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) + return nil, stdErr + } + // Avoid possible integer overflow on 32bit builds + if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) { + return nil, stdErr } mapping := IDMap{ ContainerID: int(cid), diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 7e39e3959..27b00f6fe 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -460,6 +460,9 @@ type Store interface { // Version returns version information, in the form of key-value pairs, from // the storage package. Version() ([][2]string, error) + + // GetDigestLock returns digest-specific Locker. + GetDigestLock(digest.Digest) (Locker, error) } // IDMappingOptions are used for specifying how ID mapping should be set up for @@ -529,6 +532,7 @@ type store struct { imageStore ImageStore roImageStores []ROImageStore containerStore ContainerStore + digestLockRoot string } // GetStore attempts to find an already-created Store object matching the @@ -698,9 +702,20 @@ func (s *store) load() error { return err } s.containerStore = rcs + + s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") + if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { + return err + } + return nil } +// GetDigestLock returns a digest-specific Locker. +func (s *store) GetDigestLock(d digest.Digest) (Locker, error) { + return GetLockfile(filepath.Join(s.digestLockRoot, d.String())) +} + func (s *store) getGraphDriver() (drivers.Driver, error) { if s.graphDriver != nil { return s.graphDriver, nil @@ -1023,8 +1038,9 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) } var layer, parentLayer *Layer + allStores := append([]ROLayerStore{rlstore}, lstores...) // Locate the image's top layer and its parent, if it has one. - for _, s := range append([]ROLayerStore{rlstore}, lstores...) { + for _, s := range allStores { store := s if store != rlstore { store.Lock() @@ -1041,10 +1057,13 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea // We want the layer's parent, too, if it has one. var cParentLayer *Layer if cLayer.Parent != "" { - // Its parent should be around here, somewhere. - if cParentLayer, err = store.Get(cLayer.Parent); err != nil { - // Nope, couldn't find it. We're not going to be able - // to diff this one properly. + // Its parent should be in one of the stores, somewhere. + for _, ps := range allStores { + if cParentLayer, err = ps.Get(cLayer.Parent); err == nil { + break + } + } + if cParentLayer == nil { continue } } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index e74956c9e..6c9f163a3 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -6,6 +6,7 @@ import ( "os/exec" "os/user" "path/filepath" + "strconv" "strings" "github.com/BurntSushi/toml" @@ -73,7 +74,7 @@ func GetRootlessRuntimeDir(rootlessUid int) (string, error) { if runtimeDir == "" { tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid) st, err := system.Stat(tmpDir) - if err == nil && int(st.UID()) == os.Getuid() && st.Mode() == 0700 { + if err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 { return tmpDir, nil } } @@ -158,6 +159,21 @@ func getTomlStorage(storeOptions *StoreOptions) *tomlConfig { return config } +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) { + uid := getRootlessUID() + return DefaultStoreOptions(uid != 0, uid) +} + // DefaultStoreOptions returns the default storage ops for containers func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) { var ( @@ -166,14 +182,14 @@ func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) { err error ) storageOpts := defaultStoreOptions - if rootless { + if rootless && rootlessUid != 0 { storageOpts, err = getRootlessStorageOpts(rootlessUid) if err != nil { return storageOpts, err } } - storageConf, err := DefaultConfigFile(rootless) + storageConf, err := DefaultConfigFile(rootless && rootlessUid != 0) if err != nil { return storageOpts, err } @@ -188,7 +204,7 @@ func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) { return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf) } - if rootless { + if rootless && rootlessUid != 0 { if err == nil { // If the file did not specify a graphroot or runroot, // set sane defaults so we don't try and use root-owned diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 000000000..7f434990d --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,179 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var conn net.Conn + +func init() { + var err error + conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") + if err != nil { + conn = nil + } +} + +// Enabled returns true if the local systemd journal is available for logging +func Enabled() bool { + return conn != nil +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + if conn == nil { + return journalError("could not connect to journald socket") + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, err := io.Copy(conn, data) + if err != nil && isSocketSpaceError(err) { + file, err := tempFd() + if err != nil { + return journalError(err.Error()) + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return journalError(err.Error()) + } + + rights := syscall.UnixRights(int(file.Fd())) + + /* this connection should always be a UnixConn, but better safe than sorry */ + unixConn, ok := conn.(*net.UnixConn) + if !ok { + return journalError("can't send file through non-Unix connection") + } + unixConn.WriteMsgUnix([]byte{}, rights, nil) + } else if err != nil { + return journalError(err.Error()) + } + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if !validVarName(name) { + journalError("variable name contains invalid character, ignoring") + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +func validVarName(name string) bool { + /* The variable name must be in uppercase and consist only of characters, + * numbers and underscores, and may not begin with an underscore. (from the docs) + */ + + valid := name[0] != '_' + for _, c := range name { + valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + } + return valid +} + +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok { + return false + } + + sysErr, ok := opErr.Err.(syscall.Errno) + if !ok { + return false + } + + return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS +} + +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +func journalError(s string) error { + s = "journal error: " + s + fmt.Fprintln(os.Stderr, s) + return errors.New(s) +} diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/functions.go b/vendor/github.com/coreos/go-systemd/sdjournal/functions.go new file mode 100644 index 000000000..e132369c1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/sdjournal/functions.go @@ -0,0 +1,66 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdjournal + +import ( + "github.com/coreos/pkg/dlopen" + "sync" + "unsafe" +) + +var ( + // lazy initialized + libsystemdHandle *dlopen.LibHandle + + libsystemdMutex = &sync.Mutex{} + libsystemdFunctions = map[string]unsafe.Pointer{} + libsystemdNames = []string{ + // systemd < 209 + "libsystemd-journal.so.0", + "libsystemd-journal.so", + + // systemd >= 209 merged libsystemd-journal into libsystemd proper + "libsystemd.so.0", + "libsystemd.so", + } +) + +func getFunction(name string) (unsafe.Pointer, error) { + libsystemdMutex.Lock() + defer libsystemdMutex.Unlock() + + if libsystemdHandle == nil { + h, err := dlopen.GetHandle(libsystemdNames) + if err != nil { + return nil, err + } + + libsystemdHandle = h + } + + f, ok := libsystemdFunctions[name] + if !ok { + var err error + f, err = libsystemdHandle.GetSymbolPointer(name) + if err != nil { + return nil, err + } + + libsystemdFunctions[name] = f + } + + return f, nil +} diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/journal.go b/vendor/github.com/coreos/go-systemd/sdjournal/journal.go new file mode 100644 index 000000000..b00d606c1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/sdjournal/journal.go @@ -0,0 +1,1024 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sdjournal provides a low-level Go interface to the +// systemd journal wrapped around the sd-journal C API. +// +// All public read methods map closely to the sd-journal API functions. See the +// sd-journal.h documentation[1] for information about each function. +// +// To write to the journal, see the pure-Go "journal" package +// +// [1] http://www.freedesktop.org/software/systemd/man/sd-journal.html +package sdjournal + +// #include <systemd/sd-journal.h> +// #include <systemd/sd-id128.h> +// #include <stdlib.h> +// #include <syslog.h> +// +// int +// my_sd_journal_open(void *f, sd_journal **ret, int flags) +// { +// int (*sd_journal_open)(sd_journal **, int); +// +// sd_journal_open = f; +// return sd_journal_open(ret, flags); +// } +// +// int +// my_sd_journal_open_directory(void *f, sd_journal **ret, const char *path, int flags) +// { +// int (*sd_journal_open_directory)(sd_journal **, const char *, int); +// +// sd_journal_open_directory = f; +// return sd_journal_open_directory(ret, path, flags); +// } +// +// void +// my_sd_journal_close(void *f, sd_journal *j) +// { +// int (*sd_journal_close)(sd_journal *); +// +// sd_journal_close = f; +// sd_journal_close(j); +// } +// +// int +// my_sd_journal_get_usage(void *f, sd_journal *j, uint64_t *bytes) +// { +// int (*sd_journal_get_usage)(sd_journal *, uint64_t *); +// +// sd_journal_get_usage = f; +// return sd_journal_get_usage(j, bytes); +// } +// +// int +// my_sd_journal_add_match(void *f, sd_journal *j, const void *data, size_t size) +// { +// int (*sd_journal_add_match)(sd_journal *, const void *, size_t); +// +// sd_journal_add_match = f; +// return sd_journal_add_match(j, data, size); +// } +// +// int +// my_sd_journal_add_disjunction(void *f, sd_journal *j) +// { +// int (*sd_journal_add_disjunction)(sd_journal *); +// +// sd_journal_add_disjunction = f; +// return sd_journal_add_disjunction(j); +// } +// +// int +// my_sd_journal_add_conjunction(void *f, sd_journal *j) +// { +// int (*sd_journal_add_conjunction)(sd_journal *); +// +// sd_journal_add_conjunction = f; +// return sd_journal_add_conjunction(j); +// } +// +// void +// my_sd_journal_flush_matches(void *f, sd_journal *j) +// { +// int (*sd_journal_flush_matches)(sd_journal *); +// +// sd_journal_flush_matches = f; +// sd_journal_flush_matches(j); +// } +// +// int +// my_sd_journal_next(void *f, sd_journal *j) +// { +// int (*sd_journal_next)(sd_journal *); +// +// sd_journal_next = f; +// return sd_journal_next(j); +// } +// +// int +// my_sd_journal_next_skip(void *f, sd_journal *j, uint64_t skip) +// { +// int (*sd_journal_next_skip)(sd_journal *, uint64_t); +// +// sd_journal_next_skip = f; +// return sd_journal_next_skip(j, skip); +// } +// +// int +// my_sd_journal_previous(void *f, sd_journal *j) +// { +// int (*sd_journal_previous)(sd_journal *); +// +// sd_journal_previous = f; +// return sd_journal_previous(j); +// } +// +// int +// my_sd_journal_previous_skip(void *f, sd_journal *j, uint64_t skip) +// { +// int (*sd_journal_previous_skip)(sd_journal *, uint64_t); +// +// sd_journal_previous_skip = f; +// return sd_journal_previous_skip(j, skip); +// } +// +// int +// my_sd_journal_get_data(void *f, sd_journal *j, const char *field, const void **data, size_t *length) +// { +// int (*sd_journal_get_data)(sd_journal *, const char *, const void **, size_t *); +// +// sd_journal_get_data = f; +// return sd_journal_get_data(j, field, data, length); +// } +// +// int +// my_sd_journal_set_data_threshold(void *f, sd_journal *j, size_t sz) +// { +// int (*sd_journal_set_data_threshold)(sd_journal *, size_t); +// +// sd_journal_set_data_threshold = f; +// return sd_journal_set_data_threshold(j, sz); +// } +// +// int +// my_sd_journal_get_cursor(void *f, sd_journal *j, char **cursor) +// { +// int (*sd_journal_get_cursor)(sd_journal *, char **); +// +// sd_journal_get_cursor = f; +// return sd_journal_get_cursor(j, cursor); +// } +// +// int +// my_sd_journal_test_cursor(void *f, sd_journal *j, const char *cursor) +// { +// int (*sd_journal_test_cursor)(sd_journal *, const char *); +// +// sd_journal_test_cursor = f; +// return sd_journal_test_cursor(j, cursor); +// } +// +// int +// my_sd_journal_get_realtime_usec(void *f, sd_journal *j, uint64_t *usec) +// { +// int (*sd_journal_get_realtime_usec)(sd_journal *, uint64_t *); +// +// sd_journal_get_realtime_usec = f; +// return sd_journal_get_realtime_usec(j, usec); +// } +// +// int +// my_sd_journal_get_monotonic_usec(void *f, sd_journal *j, uint64_t *usec, sd_id128_t *boot_id) +// { +// int (*sd_journal_get_monotonic_usec)(sd_journal *, uint64_t *, sd_id128_t *); +// +// sd_journal_get_monotonic_usec = f; +// return sd_journal_get_monotonic_usec(j, usec, boot_id); +// } +// +// int +// my_sd_journal_seek_head(void *f, sd_journal *j) +// { +// int (*sd_journal_seek_head)(sd_journal *); +// +// sd_journal_seek_head = f; +// return sd_journal_seek_head(j); +// } +// +// int +// my_sd_journal_seek_tail(void *f, sd_journal *j) +// { +// int (*sd_journal_seek_tail)(sd_journal *); +// +// sd_journal_seek_tail = f; +// return sd_journal_seek_tail(j); +// } +// +// +// int +// my_sd_journal_seek_cursor(void *f, sd_journal *j, const char *cursor) +// { +// int (*sd_journal_seek_cursor)(sd_journal *, const char *); +// +// sd_journal_seek_cursor = f; +// return sd_journal_seek_cursor(j, cursor); +// } +// +// int +// my_sd_journal_seek_realtime_usec(void *f, sd_journal *j, uint64_t usec) +// { +// int (*sd_journal_seek_realtime_usec)(sd_journal *, uint64_t); +// +// sd_journal_seek_realtime_usec = f; +// return sd_journal_seek_realtime_usec(j, usec); +// } +// +// int +// my_sd_journal_wait(void *f, sd_journal *j, uint64_t timeout_usec) +// { +// int (*sd_journal_wait)(sd_journal *, uint64_t); +// +// sd_journal_wait = f; +// return sd_journal_wait(j, timeout_usec); +// } +// +// void +// my_sd_journal_restart_data(void *f, sd_journal *j) +// { +// void (*sd_journal_restart_data)(sd_journal *); +// +// sd_journal_restart_data = f; +// sd_journal_restart_data(j); +// } +// +// int +// my_sd_journal_enumerate_data(void *f, sd_journal *j, const void **data, size_t *length) +// { +// int (*sd_journal_enumerate_data)(sd_journal *, const void **, size_t *); +// +// sd_journal_enumerate_data = f; +// return sd_journal_enumerate_data(j, data, length); +// } +// +// int +// my_sd_journal_query_unique(void *f, sd_journal *j, const char *field) +// { +// int(*sd_journal_query_unique)(sd_journal *, const char *); +// +// sd_journal_query_unique = f; +// return sd_journal_query_unique(j, field); +// } +// +// int +// my_sd_journal_enumerate_unique(void *f, sd_journal *j, const void **data, size_t *length) +// { +// int(*sd_journal_enumerate_unique)(sd_journal *, const void **, size_t *); +// +// sd_journal_enumerate_unique = f; +// return sd_journal_enumerate_unique(j, data, length); +// } +// +// void +// my_sd_journal_restart_unique(void *f, sd_journal *j) +// { +// void(*sd_journal_restart_unique)(sd_journal *); +// +// sd_journal_restart_unique = f; +// sd_journal_restart_unique(j); +// } +// +import "C" +import ( + "bytes" + "fmt" + "strings" + "sync" + "syscall" + "time" + "unsafe" +) + +// Journal entry field strings which correspond to: +// http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html +const ( + // User Journal Fields + SD_JOURNAL_FIELD_MESSAGE = "MESSAGE" + SD_JOURNAL_FIELD_MESSAGE_ID = "MESSAGE_ID" + SD_JOURNAL_FIELD_PRIORITY = "PRIORITY" + SD_JOURNAL_FIELD_CODE_FILE = "CODE_FILE" + SD_JOURNAL_FIELD_CODE_LINE = "CODE_LINE" + SD_JOURNAL_FIELD_CODE_FUNC = "CODE_FUNC" + SD_JOURNAL_FIELD_ERRNO = "ERRNO" + SD_JOURNAL_FIELD_SYSLOG_FACILITY = "SYSLOG_FACILITY" + SD_JOURNAL_FIELD_SYSLOG_IDENTIFIER = "SYSLOG_IDENTIFIER" + SD_JOURNAL_FIELD_SYSLOG_PID = "SYSLOG_PID" + + // Trusted Journal Fields + SD_JOURNAL_FIELD_PID = "_PID" + SD_JOURNAL_FIELD_UID = "_UID" + SD_JOURNAL_FIELD_GID = "_GID" + SD_JOURNAL_FIELD_COMM = "_COMM" + SD_JOURNAL_FIELD_EXE = "_EXE" + SD_JOURNAL_FIELD_CMDLINE = "_CMDLINE" + SD_JOURNAL_FIELD_CAP_EFFECTIVE = "_CAP_EFFECTIVE" + SD_JOURNAL_FIELD_AUDIT_SESSION = "_AUDIT_SESSION" + SD_JOURNAL_FIELD_AUDIT_LOGINUID = "_AUDIT_LOGINUID" + SD_JOURNAL_FIELD_SYSTEMD_CGROUP = "_SYSTEMD_CGROUP" + SD_JOURNAL_FIELD_SYSTEMD_SESSION = "_SYSTEMD_SESSION" + SD_JOURNAL_FIELD_SYSTEMD_UNIT = "_SYSTEMD_UNIT" + SD_JOURNAL_FIELD_SYSTEMD_USER_UNIT = "_SYSTEMD_USER_UNIT" + SD_JOURNAL_FIELD_SYSTEMD_OWNER_UID = "_SYSTEMD_OWNER_UID" + SD_JOURNAL_FIELD_SYSTEMD_SLICE = "_SYSTEMD_SLICE" + SD_JOURNAL_FIELD_SELINUX_CONTEXT = "_SELINUX_CONTEXT" + SD_JOURNAL_FIELD_SOURCE_REALTIME_TIMESTAMP = "_SOURCE_REALTIME_TIMESTAMP" + SD_JOURNAL_FIELD_BOOT_ID = "_BOOT_ID" + SD_JOURNAL_FIELD_MACHINE_ID = "_MACHINE_ID" + SD_JOURNAL_FIELD_HOSTNAME = "_HOSTNAME" + SD_JOURNAL_FIELD_TRANSPORT = "_TRANSPORT" + + // Address Fields + SD_JOURNAL_FIELD_CURSOR = "__CURSOR" + SD_JOURNAL_FIELD_REALTIME_TIMESTAMP = "__REALTIME_TIMESTAMP" + SD_JOURNAL_FIELD_MONOTONIC_TIMESTAMP = "__MONOTONIC_TIMESTAMP" +) + +// Journal event constants +const ( + SD_JOURNAL_NOP = int(C.SD_JOURNAL_NOP) + SD_JOURNAL_APPEND = int(C.SD_JOURNAL_APPEND) + SD_JOURNAL_INVALIDATE = int(C.SD_JOURNAL_INVALIDATE) +) + +const ( + // IndefiniteWait is a sentinel value that can be passed to + // sdjournal.Wait() to signal an indefinite wait for new journal + // events. It is implemented as the maximum value for a time.Duration: + // https://github.com/golang/go/blob/e4dcf5c8c22d98ac9eac7b9b226596229624cb1d/src/time/time.go#L434 + IndefiniteWait time.Duration = 1<<63 - 1 +) + +// Journal is a Go wrapper of an sd_journal structure. +type Journal struct { + cjournal *C.sd_journal + mu sync.Mutex +} + +// JournalEntry represents all fields of a journal entry plus address fields. +type JournalEntry struct { + Fields map[string]string + Cursor string + RealtimeTimestamp uint64 + MonotonicTimestamp uint64 +} + +// Match is a convenience wrapper to describe filters supplied to AddMatch. +type Match struct { + Field string + Value string +} + +// String returns a string representation of a Match suitable for use with AddMatch. +func (m *Match) String() string { + return m.Field + "=" + m.Value +} + +// NewJournal returns a new Journal instance pointing to the local journal +func NewJournal() (j *Journal, err error) { + j = &Journal{} + + sd_journal_open, err := getFunction("sd_journal_open") + if err != nil { + return nil, err + } + + r := C.my_sd_journal_open(sd_journal_open, &j.cjournal, C.SD_JOURNAL_LOCAL_ONLY) + + if r < 0 { + return nil, fmt.Errorf("failed to open journal: %d", syscall.Errno(-r)) + } + + return j, nil +} + +// NewJournalFromDir returns a new Journal instance pointing to a journal residing +// in a given directory. The supplied path may be relative or absolute; if +// relative, it will be converted to an absolute path before being opened. +func NewJournalFromDir(path string) (j *Journal, err error) { + j = &Journal{} + + sd_journal_open_directory, err := getFunction("sd_journal_open_directory") + if err != nil { + return nil, err + } + + p := C.CString(path) + defer C.free(unsafe.Pointer(p)) + + r := C.my_sd_journal_open_directory(sd_journal_open_directory, &j.cjournal, p, 0) + if r < 0 { + return nil, fmt.Errorf("failed to open journal in directory %q: %d", path, syscall.Errno(-r)) + } + + return j, nil +} + +// Close closes a journal opened with NewJournal. +func (j *Journal) Close() error { + sd_journal_close, err := getFunction("sd_journal_close") + if err != nil { + return err + } + + j.mu.Lock() + C.my_sd_journal_close(sd_journal_close, j.cjournal) + j.mu.Unlock() + + return nil +} + +// AddMatch adds a match by which to filter the entries of the journal. +func (j *Journal) AddMatch(match string) error { + sd_journal_add_match, err := getFunction("sd_journal_add_match") + if err != nil { + return err + } + + m := C.CString(match) + defer C.free(unsafe.Pointer(m)) + + j.mu.Lock() + r := C.my_sd_journal_add_match(sd_journal_add_match, j.cjournal, unsafe.Pointer(m), C.size_t(len(match))) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add match: %d", syscall.Errno(-r)) + } + + return nil +} + +// AddDisjunction inserts a logical OR in the match list. +func (j *Journal) AddDisjunction() error { + sd_journal_add_disjunction, err := getFunction("sd_journal_add_disjunction") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_add_disjunction(sd_journal_add_disjunction, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add a disjunction in the match list: %d", syscall.Errno(-r)) + } + + return nil +} + +// AddConjunction inserts a logical AND in the match list. +func (j *Journal) AddConjunction() error { + sd_journal_add_conjunction, err := getFunction("sd_journal_add_conjunction") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_add_conjunction(sd_journal_add_conjunction, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add a conjunction in the match list: %d", syscall.Errno(-r)) + } + + return nil +} + +// FlushMatches flushes all matches, disjunctions and conjunctions. +func (j *Journal) FlushMatches() { + sd_journal_flush_matches, err := getFunction("sd_journal_flush_matches") + if err != nil { + return + } + + j.mu.Lock() + C.my_sd_journal_flush_matches(sd_journal_flush_matches, j.cjournal) + j.mu.Unlock() +} + +// Next advances the read pointer into the journal by one entry. +func (j *Journal) Next() (uint64, error) { + sd_journal_next, err := getFunction("sd_journal_next") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_next(sd_journal_next, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r)) + } + + return uint64(r), nil +} + +// NextSkip advances the read pointer by multiple entries at once, +// as specified by the skip parameter. +func (j *Journal) NextSkip(skip uint64) (uint64, error) { + sd_journal_next_skip, err := getFunction("sd_journal_next_skip") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_next_skip(sd_journal_next_skip, j.cjournal, C.uint64_t(skip)) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r)) + } + + return uint64(r), nil +} + +// Previous sets the read pointer into the journal back by one entry. +func (j *Journal) Previous() (uint64, error) { + sd_journal_previous, err := getFunction("sd_journal_previous") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_previous(sd_journal_previous, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r)) + } + + return uint64(r), nil +} + +// PreviousSkip sets back the read pointer by multiple entries at once, +// as specified by the skip parameter. +func (j *Journal) PreviousSkip(skip uint64) (uint64, error) { + sd_journal_previous_skip, err := getFunction("sd_journal_previous_skip") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_previous_skip(sd_journal_previous_skip, j.cjournal, C.uint64_t(skip)) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r)) + } + + return uint64(r), nil +} + +func (j *Journal) getData(field string) (unsafe.Pointer, C.int, error) { + sd_journal_get_data, err := getFunction("sd_journal_get_data") + if err != nil { + return nil, 0, err + } + + f := C.CString(field) + defer C.free(unsafe.Pointer(f)) + + var d unsafe.Pointer + var l C.size_t + + j.mu.Lock() + r := C.my_sd_journal_get_data(sd_journal_get_data, j.cjournal, f, &d, &l) + j.mu.Unlock() + + if r < 0 { + return nil, 0, fmt.Errorf("failed to read message: %d", syscall.Errno(-r)) + } + + return d, C.int(l), nil +} + +// GetData gets the data object associated with a specific field from the +// current journal entry. +func (j *Journal) GetData(field string) (string, error) { + d, l, err := j.getData(field) + if err != nil { + return "", err + } + + return C.GoStringN((*C.char)(d), l), nil +} + +// GetDataValue gets the data object associated with a specific field from the +// current journal entry, returning only the value of the object. +func (j *Journal) GetDataValue(field string) (string, error) { + val, err := j.GetData(field) + if err != nil { + return "", err + } + + return strings.SplitN(val, "=", 2)[1], nil +} + +// GetDataBytes gets the data object associated with a specific field from the +// current journal entry. +func (j *Journal) GetDataBytes(field string) ([]byte, error) { + d, l, err := j.getData(field) + if err != nil { + return nil, err + } + + return C.GoBytes(d, l), nil +} + +// GetDataValueBytes gets the data object associated with a specific field from the +// current journal entry, returning only the value of the object. +func (j *Journal) GetDataValueBytes(field string) ([]byte, error) { + val, err := j.GetDataBytes(field) + if err != nil { + return nil, err + } + + return bytes.SplitN(val, []byte("="), 2)[1], nil +} + +// GetEntry returns a full representation of a journal entry with +// all key-value pairs of data as well as address fields (cursor, realtime +// timestamp and monotonic timestamp) +func (j *Journal) GetEntry() (*JournalEntry, error) { + sd_journal_get_realtime_usec, err := getFunction("sd_journal_get_realtime_usec") + if err != nil { + return nil, err + } + + sd_journal_get_monotonic_usec, err := getFunction("sd_journal_get_monotonic_usec") + if err != nil { + return nil, err + } + + sd_journal_get_cursor, err := getFunction("sd_journal_get_cursor") + if err != nil { + return nil, err + } + + sd_journal_restart_data, err := getFunction("sd_journal_restart_data") + if err != nil { + return nil, err + } + + sd_journal_enumerate_data, err := getFunction("sd_journal_enumerate_data") + if err != nil { + return nil, err + } + + j.mu.Lock() + defer j.mu.Unlock() + + var r C.int + entry := &JournalEntry{Fields: make(map[string]string)} + + var realtimeUsec C.uint64_t + r = C.my_sd_journal_get_realtime_usec(sd_journal_get_realtime_usec, j.cjournal, &realtimeUsec) + if r < 0 { + return nil, fmt.Errorf("failed to get realtime timestamp: %d", syscall.Errno(-r)) + } + + entry.RealtimeTimestamp = uint64(realtimeUsec) + + var monotonicUsec C.uint64_t + var boot_id C.sd_id128_t + + r = C.my_sd_journal_get_monotonic_usec(sd_journal_get_monotonic_usec, j.cjournal, &monotonicUsec, &boot_id) + if r < 0 { + return nil, fmt.Errorf("failed to get monotonic timestamp: %d", syscall.Errno(-r)) + } + + entry.MonotonicTimestamp = uint64(monotonicUsec) + + var c *C.char + // since the pointer is mutated by sd_journal_get_cursor, need to wait + // until after the call to free the memory + r = C.my_sd_journal_get_cursor(sd_journal_get_cursor, j.cjournal, &c) + defer C.free(unsafe.Pointer(c)) + if r < 0 { + return nil, fmt.Errorf("failed to get cursor: %d", syscall.Errno(-r)) + } + + entry.Cursor = C.GoString(c) + + // Implements the JOURNAL_FOREACH_DATA_RETVAL macro from journal-internal.h + var d unsafe.Pointer + var l C.size_t + C.my_sd_journal_restart_data(sd_journal_restart_data, j.cjournal) + for { + r = C.my_sd_journal_enumerate_data(sd_journal_enumerate_data, j.cjournal, &d, &l) + if r == 0 { + break + } + + if r < 0 { + return nil, fmt.Errorf("failed to read message field: %d", syscall.Errno(-r)) + } + + msg := C.GoStringN((*C.char)(d), C.int(l)) + kv := strings.SplitN(msg, "=", 2) + if len(kv) < 2 { + return nil, fmt.Errorf("failed to parse field") + } + + entry.Fields[kv[0]] = kv[1] + } + + return entry, nil +} + +// SetDataThresold sets the data field size threshold for data returned by +// GetData. To retrieve the complete data fields this threshold should be +// turned off by setting it to 0, so that the library always returns the +// complete data objects. +func (j *Journal) SetDataThreshold(threshold uint64) error { + sd_journal_set_data_threshold, err := getFunction("sd_journal_set_data_threshold") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_set_data_threshold(sd_journal_set_data_threshold, j.cjournal, C.size_t(threshold)) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to set data threshold: %d", syscall.Errno(-r)) + } + + return nil +} + +// GetRealtimeUsec gets the realtime (wallclock) timestamp of the current +// journal entry. +func (j *Journal) GetRealtimeUsec() (uint64, error) { + var usec C.uint64_t + + sd_journal_get_realtime_usec, err := getFunction("sd_journal_get_realtime_usec") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_realtime_usec(sd_journal_get_realtime_usec, j.cjournal, &usec) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get realtime timestamp: %d", syscall.Errno(-r)) + } + + return uint64(usec), nil +} + +// GetMonotonicUsec gets the monotonic timestamp of the current journal entry. +func (j *Journal) GetMonotonicUsec() (uint64, error) { + var usec C.uint64_t + var boot_id C.sd_id128_t + + sd_journal_get_monotonic_usec, err := getFunction("sd_journal_get_monotonic_usec") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_monotonic_usec(sd_journal_get_monotonic_usec, j.cjournal, &usec, &boot_id) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get monotonic timestamp: %d", syscall.Errno(-r)) + } + + return uint64(usec), nil +} + +// GetCursor gets the cursor of the current journal entry. +func (j *Journal) GetCursor() (string, error) { + sd_journal_get_cursor, err := getFunction("sd_journal_get_cursor") + if err != nil { + return "", err + } + + var d *C.char + // since the pointer is mutated by sd_journal_get_cursor, need to wait + // until after the call to free the memory + + j.mu.Lock() + r := C.my_sd_journal_get_cursor(sd_journal_get_cursor, j.cjournal, &d) + j.mu.Unlock() + defer C.free(unsafe.Pointer(d)) + + if r < 0 { + return "", fmt.Errorf("failed to get cursor: %d", syscall.Errno(-r)) + } + + cursor := C.GoString(d) + + return cursor, nil +} + +// TestCursor checks whether the current position in the journal matches the +// specified cursor +func (j *Journal) TestCursor(cursor string) error { + sd_journal_test_cursor, err := getFunction("sd_journal_test_cursor") + if err != nil { + return err + } + + c := C.CString(cursor) + defer C.free(unsafe.Pointer(c)) + + j.mu.Lock() + r := C.my_sd_journal_test_cursor(sd_journal_test_cursor, j.cjournal, c) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to test to cursor %q: %d", cursor, syscall.Errno(-r)) + } + + return nil +} + +// SeekHead seeks to the beginning of the journal, i.e. the oldest available +// entry. +func (j *Journal) SeekHead() error { + sd_journal_seek_head, err := getFunction("sd_journal_seek_head") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_head(sd_journal_seek_head, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to head of journal: %d", syscall.Errno(-r)) + } + + return nil +} + +// SeekTail may be used to seek to the end of the journal, i.e. the most recent +// available entry. +func (j *Journal) SeekTail() error { + sd_journal_seek_tail, err := getFunction("sd_journal_seek_tail") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_tail(sd_journal_seek_tail, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to tail of journal: %d", syscall.Errno(-r)) + } + + return nil +} + +// SeekRealtimeUsec seeks to the entry with the specified realtime (wallclock) +// timestamp, i.e. CLOCK_REALTIME. +func (j *Journal) SeekRealtimeUsec(usec uint64) error { + sd_journal_seek_realtime_usec, err := getFunction("sd_journal_seek_realtime_usec") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_realtime_usec(sd_journal_seek_realtime_usec, j.cjournal, C.uint64_t(usec)) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to %d: %d", usec, syscall.Errno(-r)) + } + + return nil +} + +// SeekCursor seeks to a concrete journal cursor. +func (j *Journal) SeekCursor(cursor string) error { + sd_journal_seek_cursor, err := getFunction("sd_journal_seek_cursor") + if err != nil { + return err + } + + c := C.CString(cursor) + defer C.free(unsafe.Pointer(c)) + + j.mu.Lock() + r := C.my_sd_journal_seek_cursor(sd_journal_seek_cursor, j.cjournal, c) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to cursor %q: %d", cursor, syscall.Errno(-r)) + } + + return nil +} + +// Wait will synchronously wait until the journal gets changed. The maximum time +// this call sleeps may be controlled with the timeout parameter. If +// sdjournal.IndefiniteWait is passed as the timeout parameter, Wait will +// wait indefinitely for a journal change. +func (j *Journal) Wait(timeout time.Duration) int { + var to uint64 + + sd_journal_wait, err := getFunction("sd_journal_wait") + if err != nil { + return -1 + } + + if timeout == IndefiniteWait { + // sd_journal_wait(3) calls for a (uint64_t) -1 to be passed to signify + // indefinite wait, but using a -1 overflows our C.uint64_t, so we use an + // equivalent hex value. + to = 0xffffffffffffffff + } else { + to = uint64(time.Now().Add(timeout).Unix() / 1000) + } + j.mu.Lock() + r := C.my_sd_journal_wait(sd_journal_wait, j.cjournal, C.uint64_t(to)) + j.mu.Unlock() + + return int(r) +} + +// GetUsage returns the journal disk space usage, in bytes. +func (j *Journal) GetUsage() (uint64, error) { + var out C.uint64_t + + sd_journal_get_usage, err := getFunction("sd_journal_get_usage") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_usage(sd_journal_get_usage, j.cjournal, &out) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get journal disk space usage: %d", syscall.Errno(-r)) + } + + return uint64(out), nil +} + +// GetUniqueValues returns all unique values for a given field. +func (j *Journal) GetUniqueValues(field string) ([]string, error) { + var result []string + + sd_journal_query_unique, err := getFunction("sd_journal_query_unique") + if err != nil { + return nil, err + } + + sd_journal_enumerate_unique, err := getFunction("sd_journal_enumerate_unique") + if err != nil { + return nil, err + } + + sd_journal_restart_unique, err := getFunction("sd_journal_restart_unique") + if err != nil { + return nil, err + } + + j.mu.Lock() + defer j.mu.Unlock() + + f := C.CString(field) + defer C.free(unsafe.Pointer(f)) + + r := C.my_sd_journal_query_unique(sd_journal_query_unique, j.cjournal, f) + + if r < 0 { + return nil, fmt.Errorf("failed to query journal: %d", syscall.Errno(-r)) + } + + // Implements the SD_JOURNAL_FOREACH_UNIQUE macro from sd-journal.h + var d unsafe.Pointer + var l C.size_t + C.my_sd_journal_restart_unique(sd_journal_restart_unique, j.cjournal) + for { + r = C.my_sd_journal_enumerate_unique(sd_journal_enumerate_unique, j.cjournal, &d, &l) + if r == 0 { + break + } + + if r < 0 { + return nil, fmt.Errorf("failed to read message field: %d", syscall.Errno(-r)) + } + + msg := C.GoStringN((*C.char)(d), C.int(l)) + kv := strings.SplitN(msg, "=", 2) + if len(kv) < 2 { + return nil, fmt.Errorf("failed to parse field") + } + + result = append(result, kv[1]) + } + + return result, nil +} diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/read.go b/vendor/github.com/coreos/go-systemd/sdjournal/read.go new file mode 100644 index 000000000..b581f03b4 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/sdjournal/read.go @@ -0,0 +1,260 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdjournal + +import ( + "errors" + "fmt" + "io" + "log" + "strings" + "time" +) + +var ( + ErrExpired = errors.New("Timeout expired") +) + +// JournalReaderConfig represents options to drive the behavior of a JournalReader. +type JournalReaderConfig struct { + // The Since, NumFromTail and Cursor options are mutually exclusive and + // determine where the reading begins within the journal. The order in which + // options are written is exactly the order of precedence. + Since time.Duration // start relative to a Duration from now + NumFromTail uint64 // start relative to the tail + Cursor string // start relative to the cursor + + // Show only journal entries whose fields match the supplied values. If + // the array is empty, entries will not be filtered. + Matches []Match + + // If not empty, the journal instance will point to a journal residing + // in this directory. The supplied path may be relative or absolute. + Path string +} + +// JournalReader is an io.ReadCloser which provides a simple interface for iterating through the +// systemd journal. A JournalReader is not safe for concurrent use by multiple goroutines. +type JournalReader struct { + journal *Journal + msgReader *strings.Reader +} + +// NewJournalReader creates a new JournalReader with configuration options that are similar to the +// systemd journalctl tool's iteration and filtering features. +func NewJournalReader(config JournalReaderConfig) (*JournalReader, error) { + r := &JournalReader{} + + // Open the journal + var err error + if config.Path != "" { + r.journal, err = NewJournalFromDir(config.Path) + } else { + r.journal, err = NewJournal() + } + if err != nil { + return nil, err + } + + // Add any supplied matches + for _, m := range config.Matches { + r.journal.AddMatch(m.String()) + } + + // Set the start position based on options + if config.Since != 0 { + // Start based on a relative time + start := time.Now().Add(config.Since) + if err := r.journal.SeekRealtimeUsec(uint64(start.UnixNano() / 1000)); err != nil { + return nil, err + } + } else if config.NumFromTail != 0 { + // Start based on a number of lines before the tail + if err := r.journal.SeekTail(); err != nil { + return nil, err + } + + // Move the read pointer into position near the tail. Go one further than + // the option so that the initial cursor advancement positions us at the + // correct starting point. + skip, err := r.journal.PreviousSkip(config.NumFromTail + 1) + if err != nil { + return nil, err + } + // If we skipped fewer lines than expected, we have reached journal start. + // Thus, we seek to head so that next invocation can read the first line. + if skip != config.NumFromTail+1 { + if err := r.journal.SeekHead(); err != nil { + return nil, err + } + } + } else if config.Cursor != "" { + // Start based on a custom cursor + if err := r.journal.SeekCursor(config.Cursor); err != nil { + return nil, err + } + } + + return r, nil +} + +// Read reads entries from the journal. Read follows the Reader interface so +// it must be able to read a specific amount of bytes. Journald on the other +// hand only allows us to read full entries of arbitrary size (without byte +// granularity). JournalReader is therefore internally buffering entries that +// don't fit in the read buffer. Callers should keep calling until 0 and/or an +// error is returned. +func (r *JournalReader) Read(b []byte) (int, error) { + var err error + + if r.msgReader == nil { + var c uint64 + + // Advance the journal cursor. It has to be called at least one time + // before reading + c, err = r.journal.Next() + + // An unexpected error + if err != nil { + return 0, err + } + + // EOF detection + if c == 0 { + return 0, io.EOF + } + + // Build a message + var msg string + msg, err = r.buildMessage() + + if err != nil { + return 0, err + } + r.msgReader = strings.NewReader(msg) + } + + // Copy and return the message + var sz int + sz, err = r.msgReader.Read(b) + if err == io.EOF { + // The current entry has been fully read. Don't propagate this + // EOF, so the next entry can be read at the next Read() + // iteration. + r.msgReader = nil + return sz, nil + } + if err != nil { + return sz, err + } + if r.msgReader.Len() == 0 { + r.msgReader = nil + } + + return sz, nil +} + +// Close closes the JournalReader's handle to the journal. +func (r *JournalReader) Close() error { + return r.journal.Close() +} + +// Rewind attempts to rewind the JournalReader to the first entry. +func (r *JournalReader) Rewind() error { + r.msgReader = nil + return r.journal.SeekHead() +} + +// Follow synchronously follows the JournalReader, writing each new journal entry to writer. The +// follow will continue until a single time.Time is received on the until channel. +func (r *JournalReader) Follow(until <-chan time.Time, writer io.Writer) (err error) { + + // Process journal entries and events. Entries are flushed until the tail or + // timeout is reached, and then we wait for new events or the timeout. + var msg = make([]byte, 64*1<<(10)) +process: + for { + c, err := r.Read(msg) + if err != nil && err != io.EOF { + break process + } + + select { + case <-until: + return ErrExpired + default: + if c > 0 { + if _, err = writer.Write(msg[:c]); err != nil { + break process + } + continue process + } + } + + // We're at the tail, so wait for new events or time out. + // Holds journal events to process. Tightly bounded for now unless there's a + // reason to unblock the journal watch routine more quickly. + events := make(chan int, 1) + pollDone := make(chan bool, 1) + go func() { + for { + select { + case <-pollDone: + return + default: + events <- r.journal.Wait(time.Duration(1) * time.Second) + } + } + }() + + select { + case <-until: + pollDone <- true + return ErrExpired + case e := <-events: + pollDone <- true + switch e { + case SD_JOURNAL_NOP, SD_JOURNAL_APPEND, SD_JOURNAL_INVALIDATE: + // TODO: need to account for any of these? + default: + log.Printf("Received unknown event: %d\n", e) + } + continue process + } + } + + return +} + +// buildMessage returns a string representing the current journal entry in a simple format which +// includes the entry timestamp and MESSAGE field. +func (r *JournalReader) buildMessage() (string, error) { + var msg string + var usec uint64 + var err error + + if msg, err = r.journal.GetData("MESSAGE"); err != nil { + return "", err + } + + if usec, err = r.journal.GetRealtimeUsec(); err != nil { + return "", err + } + + timestamp := time.Unix(0, int64(usec)*int64(time.Microsecond)) + + return fmt.Sprintf("%s %s\n", timestamp, msg), nil +} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 000000000..b39ddfa5c --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/README.md b/vendor/github.com/coreos/pkg/README.md new file mode 100644 index 000000000..ca68a07f0 --- /dev/null +++ b/vendor/github.com/coreos/pkg/README.md @@ -0,0 +1,4 @@ +a collection of go utility packages + +[![Build Status](https://travis-ci.org/coreos/pkg.png?branch=master)](https://travis-ci.org/coreos/pkg) +[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/coreos/pkg) diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen.go b/vendor/github.com/coreos/pkg/dlopen/dlopen.go new file mode 100644 index 000000000..23774f612 --- /dev/null +++ b/vendor/github.com/coreos/pkg/dlopen/dlopen.go @@ -0,0 +1,82 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dlopen provides some convenience functions to dlopen a library and +// get its symbols. +package dlopen + +// #cgo LDFLAGS: -ldl +// #include <stdlib.h> +// #include <dlfcn.h> +import "C" +import ( + "errors" + "fmt" + "unsafe" +) + +var ErrSoNotFound = errors.New("unable to open a handle to the library") + +// LibHandle represents an open handle to a library (.so) +type LibHandle struct { + Handle unsafe.Pointer + Libname string +} + +// GetHandle tries to get a handle to a library (.so), attempting to access it +// by the names specified in libs and returning the first that is successfully +// opened. Callers are responsible for closing the handler. If no library can +// be successfully opened, an error is returned. +func GetHandle(libs []string) (*LibHandle, error) { + for _, name := range libs { + libname := C.CString(name) + defer C.free(unsafe.Pointer(libname)) + handle := C.dlopen(libname, C.RTLD_LAZY) + if handle != nil { + h := &LibHandle{ + Handle: handle, + Libname: name, + } + return h, nil + } + } + return nil, ErrSoNotFound +} + +// GetSymbolPointer takes a symbol name and returns a pointer to the symbol. +func (l *LibHandle) GetSymbolPointer(symbol string) (unsafe.Pointer, error) { + sym := C.CString(symbol) + defer C.free(unsafe.Pointer(sym)) + + C.dlerror() + p := C.dlsym(l.Handle, sym) + e := C.dlerror() + if e != nil { + return nil, fmt.Errorf("error resolving symbol %q: %v", symbol, errors.New(C.GoString(e))) + } + + return p, nil +} + +// Close closes a LibHandle. +func (l *LibHandle) Close() error { + C.dlerror() + C.dlclose(l.Handle) + e := C.dlerror() + if e != nil { + return fmt.Errorf("error closing %v: %v", l.Libname, errors.New(C.GoString(e))) + } + + return nil +} diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go new file mode 100644 index 000000000..48a660104 --- /dev/null +++ b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go @@ -0,0 +1,56 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build linux + +package dlopen + +// #include <string.h> +// #include <stdlib.h> +// +// int +// my_strlen(void *f, const char *s) +// { +// size_t (*strlen)(const char *); +// +// strlen = (size_t (*)(const char *))f; +// return strlen(s); +// } +import "C" + +import ( + "fmt" + "unsafe" +) + +func strlen(libs []string, s string) (int, error) { + h, err := GetHandle(libs) + if err != nil { + return -1, fmt.Errorf(`couldn't get a handle to the library: %v`, err) + } + defer h.Close() + + f := "strlen" + cs := C.CString(s) + defer C.free(unsafe.Pointer(cs)) + + strlen, err := h.GetSymbolPointer(f) + if err != nil { + return -1, fmt.Errorf(`couldn't get symbol %q: %v`, f, err) + } + + len := C.my_strlen(strlen, cs) + + return int(len), nil +} |