diff options
39 files changed, 601 insertions, 199 deletions
@@ -152,6 +152,12 @@ libpodimage: ## Build the libpod image dbuild: libpodimage ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} make all +dbuild-podman-remote: libpodimage + ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} go build -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS) remoteclient" -o bin/podman-remote $(PROJECT)/cmd/podman + +dbuild-podman-remote-darwin: libpodimage + ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} env GOOS=darwin go build -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/podman-remote-darwin $(PROJECT)/cmd/podman + test: libpodimage ## Run tests on built image ${CONTAINER_RUNTIME} run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e OCI_RUNTIME -e CGROUP_MANAGER=cgroupfs -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} make clean all localunit install.catatonit localintegration @@ -45,7 +45,11 @@ This project tests all builds against each supported version of Fedora, the late Podman can also generate Kubernetes YAML based on a container or Pod (see [podman-generate-kube](https://github.com/containers/libpod/blob/master/docs/podman-generate-kube.1.md)), which allows for an easy transition from a local development environment - to a production Kubernetes cluster. + to a production Kubernetes cluster. If Kubernetes does not fit your requirements, + there are other third-party tools that support the docker-compose format such as + [kompose](https://github.com/kubernetes/kompose/) and + [podman-compose](https://github.com/muayyad-alsadi/podman-compose) + that might be appropriate for your environment. ## OCI Projects Plans diff --git a/cmd/podman/commands.go b/cmd/podman/commands.go index c36452cfe..7680d6df2 100644 --- a/cmd/podman/commands.go +++ b/cmd/podman/commands.go @@ -19,7 +19,6 @@ func getMainCommands() []*cobra.Command { _mountCommand, _portCommand, _refreshCommand, - _restartCommand, _searchCommand, _statsCommand, _topCommand, @@ -50,7 +49,6 @@ func getContainerSubCommands() []*cobra.Command { _portCommand, _pruneContainersCommand, _refreshCommand, - _restartCommand, _restoreCommand, _runlabelCommand, _statsCommand, diff --git a/cmd/podman/container.go b/cmd/podman/container.go index 7733c8eef..28e0f0e4a 100644 --- a/cmd/podman/container.go +++ b/cmd/podman/container.go @@ -60,6 +60,7 @@ var ( _listSubCommand, _logsCommand, _pauseCommand, + _restartCommand, _runCommand, _rmCommand, _startCommand, diff --git a/cmd/podman/images.go b/cmd/podman/images.go index f584c1131..41aa213a8 100644 --- a/cmd/podman/images.go +++ b/cmd/podman/images.go @@ -243,7 +243,7 @@ func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerIma // If all is false and the image doesn't have a name, check to see if the top layer of the image is a parent // to another image's top layer. If it is, then it is an intermediate image so don't print out if the --all flag // is not set. - isParent, err := img.IsParent() + isParent, err := img.IsParent(ctx) if err != nil { logrus.Errorf("error checking if image is a parent %q: %v", img.ID(), err) } diff --git a/cmd/podman/images_prune.go b/cmd/podman/images_prune.go index 84181d0a2..c522c8b15 100644 --- a/cmd/podman/images_prune.go +++ b/cmd/podman/images_prune.go @@ -45,7 +45,7 @@ func pruneImagesCmd(c *cliconfig.PruneImagesValues) error { // Call prune; if any cids are returned, print them and then // return err in case an error also came up - pruneCids, err := runtime.PruneImages(c.All) + pruneCids, err := runtime.PruneImages(getContext(), c.All) if len(pruneCids) > 0 { for _, cid := range pruneCids { fmt.Println(cid) diff --git a/cmd/podman/main.go b/cmd/podman/main.go index 15f4a5d71..392dfe542 100644 --- a/cmd/podman/main.go +++ b/cmd/podman/main.go @@ -50,6 +50,7 @@ var mainCommands = []*cobra.Command{ &_psCommand, _pullCommand, _pushCommand, + _restartCommand, _rmCommand, &_rmiCommand, _runCommand, diff --git a/cmd/podman/restart.go b/cmd/podman/restart.go index 5a9f3043a..9ab2dd528 100644 --- a/cmd/podman/restart.go +++ b/cmd/podman/restart.go @@ -2,11 +2,9 @@ package main import ( "github.com/containers/libpod/cmd/podman/cliconfig" - "github.com/containers/libpod/cmd/podman/libpodruntime" - "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/adapter" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -22,7 +20,6 @@ var ( RunE: func(cmd *cobra.Command, args []string) error { restartCommand.InputArgs = args restartCommand.GlobalFlags = MainGlobalOpts - restartCommand.Remote = remoteclient return restartCmd(&restartCommand) }, Args: func(cmd *cobra.Command, args []string) error { @@ -49,83 +46,30 @@ func init() { } func restartCmd(c *cliconfig.RestartValues) error { - var ( - restartFuncs []shared.ParallelWorkerInput - containers []*libpod.Container - restartContainers []*libpod.Container - ) - - args := c.InputArgs - runOnly := c.Running all := c.All - if len(args) < 1 && !c.Latest && !all { + if len(c.InputArgs) < 1 && !c.Latest && !all { return errors.Wrapf(libpod.ErrInvalidArg, "you must provide at least one container name or ID") } - runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand) + runtime, err := adapter.GetRuntime(&c.PodmanCommand) if err != nil { return errors.Wrapf(err, "error creating libpod runtime") } defer runtime.Shutdown(false) - timeout := c.Timeout - useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed - - // Handle --latest - if c.Latest { - lastCtr, err := runtime.GetLatestContainer() - if err != nil { - return errors.Wrapf(err, "unable to get latest container") - } - restartContainers = append(restartContainers, lastCtr) - } else if runOnly { - containers, err = getAllOrLatestContainers(&c.PodmanCommand, runtime, libpod.ContainerStateRunning, "running") - if err != nil { - return err - } - restartContainers = append(restartContainers, containers...) - } else if all { - containers, err = runtime.GetAllContainers() - if err != nil { - return err - } - restartContainers = append(restartContainers, containers...) - } else { - for _, id := range args { - ctr, err := runtime.LookupContainer(id) - if err != nil { - return err + ok, failures, err := runtime.Restart(getContext(), c) + if err != nil { + if errors.Cause(err) == libpod.ErrNoSuchCtr { + if len(c.InputArgs) > 1 { + exitCode = 125 + } else { + exitCode = 1 } - restartContainers = append(restartContainers, ctr) } + return err } - - maxWorkers := shared.Parallelize("restart") - if c.GlobalIsSet("max-workers") { - maxWorkers = c.GlobalFlags.MaxWorks + if len(failures) > 0 { + exitCode = 125 } - - logrus.Debugf("Setting maximum workers to %d", maxWorkers) - - // We now have a slice of all the containers to be restarted. Iterate them to - // create restart Funcs with a timeout as needed - for _, ctr := range restartContainers { - con := ctr - ctrTimeout := ctr.StopTimeout() - if useTimeout { - ctrTimeout = timeout - } - - f := func() error { - return con.RestartWithTimeout(getContext(), ctrTimeout) - } - - restartFuncs = append(restartFuncs, shared.ParallelWorkerInput{ - ContainerID: con.ID(), - ParallelFunc: f, - }) - } - - restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, restartFuncs) - return printParallelOutput(restartErrors, errCount) + return printCmdResults(ok, failures) } diff --git a/cmd/podman/rmi.go b/cmd/podman/rmi.go index 7ec875d5b..be7c81dab 100644 --- a/cmd/podman/rmi.go +++ b/cmd/podman/rmi.go @@ -97,7 +97,7 @@ func rmiCmd(c *cliconfig.RmiValues) error { return errors.New("unable to delete all images; re-run the rmi command again.") } for _, i := range imagesToDelete { - isParent, err := i.IsParent() + isParent, err := i.IsParent(ctx) if err != nil { return err } diff --git a/cmd/podman/system_df.go b/cmd/podman/system_df.go index 16a8ad120..aa0ead022 100644 --- a/cmd/podman/system_df.go +++ b/cmd/podman/system_df.go @@ -201,7 +201,7 @@ func imageUniqueSize(ctx context.Context, images []*image.Image) (map[string]uin for _, img := range images { parentImg := img for { - next, err := parentImg.GetParent() + next, err := parentImg.GetParent(ctx) if err != nil { return nil, errors.Wrapf(err, "error getting parent of image %s", parentImg.ID()) } @@ -246,11 +246,11 @@ func getImageDiskUsage(ctx context.Context, images []*image.Image, imageUsedbyCi unreclaimableSize += imageUsedSize(img, imgUniqueSizeMap, imageUsedbyCintainerMap, imageUsedbyActiveContainerMap) - isParent, err := img.IsParent() + isParent, err := img.IsParent(ctx) if err != nil { return imageDiskUsage, err } - parent, err := img.GetParent() + parent, err := img.GetParent(ctx) if err != nil { return imageDiskUsage, errors.Wrapf(err, "error getting parent of image %s", img.ID()) } @@ -437,11 +437,11 @@ func getImageVerboseDiskUsage(ctx context.Context, images []*image.Image, images return imagesVerboseDiskUsage, errors.Wrapf(err, "error getting unique size of images") } for _, img := range images { - isParent, err := img.IsParent() + isParent, err := img.IsParent(ctx) if err != nil { return imagesVerboseDiskUsage, errors.Wrapf(err, "error checking if %s is a parent images", img.ID()) } - parent, err := img.GetParent() + parent, err := img.GetParent(ctx) if err != nil { return imagesVerboseDiskUsage, errors.Wrapf(err, "error getting parent of image %s", img.ID()) } diff --git a/cmd/podman/system_prune.go b/cmd/podman/system_prune.go index 8900e2644..2c1c5607a 100644 --- a/cmd/podman/system_prune.go +++ b/cmd/podman/system_prune.go @@ -110,7 +110,7 @@ Are you sure you want to continue? [y/N] `, volumeString) // Call prune; if any cids are returned, print them and then // return err in case an error also came up - pruneCids, err := runtime.PruneImages(c.All) + pruneCids, err := runtime.PruneImages(ctx, c.All) if len(pruneCids) > 0 { fmt.Println("Deleted Images") for _, cid := range pruneCids { diff --git a/libpod/image/image.go b/libpod/image/image.go index 27773edcf..b965a4640 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -355,8 +355,8 @@ func (i *Image) TopLayer() string { // outside the context of images // TODO: the force param does nothing as of now. Need to move container // handling logic here eventually. -func (i *Image) Remove(force bool) error { - parent, err := i.GetParent() +func (i *Image) Remove(ctx context.Context, force bool) error { + parent, err := i.GetParent(ctx) if err != nil { return err } @@ -365,11 +365,11 @@ func (i *Image) Remove(force bool) error { } i.newImageEvent(events.Remove) for parent != nil { - nextParent, err := parent.GetParent() + nextParent, err := parent.GetParent(ctx) if err != nil { return err } - children, err := parent.GetChildren() + children, err := parent.GetChildren(ctx) if err != nil { return err } @@ -681,7 +681,8 @@ type History struct { Comment string `json:"comment"` } -// History gets the history of an image and information about its layers +// History gets the history of an image and the IDs of images that are part of +// its history func (i *Image) History(ctx context.Context) ([]*History, error) { img, err := i.toImageRef(ctx) if err != nil { @@ -692,31 +693,92 @@ func (i *Image) History(ctx context.Context) ([]*History, error) { return nil, err } - // Get the IDs of the images making up the history layers - // if the images exist locally in the store + // Use our layers list to find images that use one of them as its + // topmost layer. + interestingLayers := make(map[string]bool) + layer, err := i.imageruntime.store.Layer(i.TopLayer()) + if err != nil { + return nil, err + } + for layer != nil { + interestingLayers[layer.ID] = true + if layer.Parent == "" { + break + } + layer, err = i.imageruntime.store.Layer(layer.Parent) + if err != nil { + return nil, err + } + } + + // Get the IDs of the images that share some of our layers. Hopefully + // this step means that we'll be able to avoid reading the + // configuration of every single image in local storage later on. images, err := i.imageruntime.GetImages() if err != nil { return nil, errors.Wrapf(err, "error getting images from store") } - imageIDs := []string{i.ID()} - if err := i.historyLayerIDs(i.TopLayer(), images, &imageIDs); err != nil { - return nil, errors.Wrap(err, "error getting image IDs for layers in history") + interestingImages := make([]*Image, 0, len(images)) + for i := range images { + if interestingLayers[images[i].TopLayer()] { + interestingImages = append(interestingImages, images[i]) + } + } + + // Build a list of image IDs that correspond to our history entries. + historyImages := make([]*Image, len(oci.History)) + if len(oci.History) > 0 { + // The starting image shares its whole history with itself. + historyImages[len(historyImages)-1] = i + for i := range interestingImages { + image, err := images[i].ociv1Image(ctx) + if err != nil { + return nil, errors.Wrapf(err, "error getting image configuration for image %q", images[i].ID()) + } + // If the candidate has a longer history or no history + // at all, then it doesn't share the portion of our + // history that we're interested in matching with other + // images. + if len(image.History) == 0 || len(image.History) > len(historyImages) { + continue + } + // If we don't include all of the layers that the + // candidate image does (i.e., our rootfs didn't look + // like its rootfs at any point), then it can't be part + // of our history. + if len(image.RootFS.DiffIDs) > len(oci.RootFS.DiffIDs) { + continue + } + candidateLayersAreUsed := true + for i := range image.RootFS.DiffIDs { + if image.RootFS.DiffIDs[i] != oci.RootFS.DiffIDs[i] { + candidateLayersAreUsed = false + break + } + } + if !candidateLayersAreUsed { + continue + } + // If the candidate's entire history is an initial + // portion of our history, then we're based on it, + // either directly or indirectly. + sharedHistory := historiesMatch(oci.History, image.History) + if sharedHistory == len(image.History) { + historyImages[sharedHistory-1] = images[i] + } + } } var ( - imageID string - imgIDCount = 0 size int64 sizeCount = 1 allHistory []*History ) for i := len(oci.History) - 1; i >= 0; i-- { - if imgIDCount < len(imageIDs) { - imageID = imageIDs[imgIDCount] - imgIDCount++ - } else { - imageID = "<missing>" + imageID := "<missing>" + if historyImages[i] != nil { + imageID = historyImages[i].ID() } if !oci.History[i].EmptyLayer { size = img.LayerInfos()[len(img.LayerInfos())-sizeCount].Size @@ -1008,26 +1070,110 @@ func splitString(input string) string { // IsParent goes through the layers in the store and checks if i.TopLayer is // the parent of any other layer in store. Double check that image with that // layer exists as well. -func (i *Image) IsParent() (bool, error) { - children, err := i.GetChildren() +func (i *Image) IsParent(ctx context.Context) (bool, error) { + children, err := i.getChildren(ctx, 1) if err != nil { return false, err } return len(children) > 0, nil } +// historiesMatch returns the number of entries in the histories which have the +// same contents +func historiesMatch(a, b []imgspecv1.History) int { + i := 0 + for i < len(a) && i < len(b) { + if a[i].Created != nil && b[i].Created == nil { + return i + } + if a[i].Created == nil && b[i].Created != nil { + return i + } + if a[i].Created != nil && b[i].Created != nil { + if !a[i].Created.Equal(*(b[i].Created)) { + return i + } + } + if a[i].CreatedBy != b[i].CreatedBy { + return i + } + if a[i].Author != b[i].Author { + return i + } + if a[i].Comment != b[i].Comment { + return i + } + if a[i].EmptyLayer != b[i].EmptyLayer { + return i + } + i++ + } + return i +} + +// areParentAndChild checks diff ID and history in the two images and return +// true if the second should be considered to be directly based on the first +func areParentAndChild(parent, child *imgspecv1.Image) bool { + // the child and candidate parent should share all of the + // candidate parent's diff IDs, which together would have + // controlled which layers were used + if len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) { + return false + } + childUsesCandidateDiffs := true + for i := range parent.RootFS.DiffIDs { + if child.RootFS.DiffIDs[i] != parent.RootFS.DiffIDs[i] { + childUsesCandidateDiffs = false + break + } + } + if !childUsesCandidateDiffs { + return false + } + // the child should have the same history as the parent, plus + // one more entry + if len(parent.History)+1 != len(child.History) { + return false + } + if historiesMatch(parent.History, child.History) != len(parent.History) { + return false + } + return true +} + // GetParent returns the image ID of the parent. Return nil if a parent is not found. -func (i *Image) GetParent() (*Image, error) { +func (i *Image) GetParent(ctx context.Context) (*Image, error) { images, err := i.imageruntime.GetImages() if err != nil { return nil, err } - layer, err := i.imageruntime.store.Layer(i.TopLayer()) + childLayer, err := i.imageruntime.store.Layer(i.TopLayer()) + if err != nil { + return nil, err + } + // fetch the configuration for the child image + child, err := i.ociv1Image(ctx) if err != nil { return nil, err } for _, img := range images { - if img.TopLayer() == layer.Parent { + if img.ID() == i.ID() { + continue + } + candidateLayer := img.TopLayer() + // as a child, our top layer is either the candidate parent's + // layer, or one that's derived from it, so skip over any + // candidate image where we know that isn't the case + if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID { + continue + } + // fetch the configuration for the candidate image + candidate, err := img.ociv1Image(ctx) + if err != nil { + return nil, err + } + // compare them + if areParentAndChild(candidate, child) { return img, nil } } @@ -1035,36 +1181,53 @@ func (i *Image) GetParent() (*Image, error) { } // GetChildren returns a list of the imageIDs that depend on the image -func (i *Image) GetChildren() ([]string, error) { +func (i *Image) GetChildren(ctx context.Context) ([]string, error) { + return i.getChildren(ctx, 0) +} + +// getChildren returns a list of at most "max" imageIDs that depend on the image +func (i *Image) getChildren(ctx context.Context, max int) ([]string, error) { var children []string images, err := i.imageruntime.GetImages() if err != nil { return nil, err } - layers, err := i.imageruntime.store.Layers() + + // fetch the configuration for the parent image + parent, err := i.ociv1Image(ctx) if err != nil { return nil, err } + parentLayer := i.TopLayer() - for _, layer := range layers { - if layer.Parent == i.TopLayer() { - if imageID := getImageOfTopLayer(images, layer.ID); len(imageID) > 0 { - children = append(children, imageID...) - } - } - } - return children, nil -} - -// getImageOfTopLayer returns the image ID where layer is the top layer of the image -func getImageOfTopLayer(images []*Image, layer string) []string { - var matches []string for _, img := range images { - if img.TopLayer() == layer { - matches = append(matches, img.ID()) + if img.ID() == i.ID() { + continue + } + candidateLayer, err := img.Layer() + if err != nil { + return nil, err + } + // if this image's top layer is not our top layer, and is not + // based on our top layer, we can skip it + if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer { + continue + } + // fetch the configuration for the candidate image + candidate, err := img.ociv1Image(ctx) + if err != nil { + return nil, err + } + // compare them + if areParentAndChild(parent, candidate) { + children = append(children, img.ID()) + } + // if we're not building an exhaustive list, maybe we're done? + if max > 0 && len(children) >= max { + break } } - return matches + return children, nil } // InputIsID returns a bool if the user input for an image diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go index dfe817c90..e93ebf797 100644 --- a/libpod/image/image_test.go +++ b/libpod/image/image_test.go @@ -142,7 +142,7 @@ func TestImage_New(t *testing.T) { newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, false, nil) assert.NoError(t, err) assert.NotEqual(t, newImage.ID(), "") - err = newImage.Remove(false) + err = newImage.Remove(context.Background(), false) assert.NoError(t, err) } diff --git a/libpod/image/prune.go b/libpod/image/prune.go index 5bd3c2c99..a4f8a0c9f 100644 --- a/libpod/image/prune.go +++ b/libpod/image/prune.go @@ -1,6 +1,8 @@ package image import ( + "context" + "github.com/containers/libpod/libpod/events" "github.com/pkg/errors" ) @@ -34,14 +36,14 @@ func (ir *Runtime) GetPruneImages(all bool) ([]*Image, error) { // PruneImages prunes dangling and optionally all unused images from the local // image store -func (ir *Runtime) PruneImages(all bool) ([]string, error) { +func (ir *Runtime) PruneImages(ctx context.Context, all bool) ([]string, error) { var prunedCids []string pruneImages, err := ir.GetPruneImages(all) if err != nil { return nil, errors.Wrap(err, "unable to get images to prune") } for _, p := range pruneImages { - if err := p.Remove(true); err != nil { + if err := p.Remove(ctx, true); err != nil { return nil, errors.Wrap(err, "failed to prune image") } defer p.newImageEvent(events.Prune) diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index 02f925fc6..5e9f65acc 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -57,7 +57,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool) } } - hasChildren, err := img.IsParent() + hasChildren, err := img.IsParent(ctx) if err != nil { return "", err } @@ -82,12 +82,12 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool) // reponames and no force is applied, we error out. return "", fmt.Errorf("unable to delete %s (must force) - image is referred to in multiple tags", img.ID()) } - err = img.Remove(force) + err = img.Remove(ctx, force) if err != nil && errors.Cause(err) == storage.ErrImageUsedByContainer { if errStorage := r.rmStorageContainers(force, img); errStorage == nil { // Containers associated with the image should be deleted now, // let's try removing the image again. - err = img.Remove(force) + err = img.Remove(ctx, force) } else { err = errStorage } diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go index a5b911da1..8481a0cec 100644 --- a/pkg/adapter/containers.go +++ b/pkg/adapter/containers.go @@ -697,3 +697,72 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp } return pool.Run() } + +// Restart containers without or without a timeout +func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) { + var ( + containers []*libpod.Container + restartContainers []*libpod.Container + err error + ) + useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed + inputTimeout := c.Timeout + + // Handle --latest + if c.Latest { + lastCtr, err := r.Runtime.GetLatestContainer() + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to get latest container") + } + restartContainers = append(restartContainers, lastCtr) + } else if c.Running { + containers, err = r.GetRunningContainers() + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, containers...) + } else if c.All { + containers, err = r.Runtime.GetAllContainers() + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, containers...) + } else { + for _, id := range c.InputArgs { + ctr, err := r.Runtime.LookupContainer(id) + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, ctr) + } + } + + maxWorkers := shared.DefaultPoolSize("restart") + if c.GlobalIsSet("max-workers") { + maxWorkers = c.GlobalFlags.MaxWorks + } + + logrus.Debugf("Setting maximum workers to %d", maxWorkers) + + // We now have a slice of all the containers to be restarted. Iterate them to + // create restart Funcs with a timeout as needed + pool := shared.NewPool("restart", maxWorkers, len(restartContainers)) + for _, c := range restartContainers { + ctr := c + timeout := ctr.StopTimeout() + if useTimeout { + timeout = inputTimeout + } + pool.Add(shared.Job{ + ID: ctr.ID(), + Fn: func() error { + err := ctr.RestartWithTimeout(ctx, timeout) + if err != nil { + logrus.Debugf("Failed to restart container %s: %s", ctr.ID(), err.Error()) + } + return err + }, + }) + } + return pool.Run() +} diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go index cb61871bf..e8f221eaf 100644 --- a/pkg/adapter/containers_remote.go +++ b/pkg/adapter/containers_remote.go @@ -45,6 +45,12 @@ func (c *Container) ID() string { return c.config.ID } +// Restart a single container +func (c *Container) Restart(timeout int64) error { + _, err := iopodman.RestartContainer().Call(c.Runtime.Conn, c.ID(), timeout) + return err +} + // Pause a container func (c *Container) Pause() error { _, err := iopodman.PauseContainer().Call(c.Runtime.Conn, c.ID()) @@ -132,6 +138,23 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) { }, nil } +// GetAllContainers returns all containers in a slice +func (r *LocalRuntime) GetAllContainers() ([]*Container, error) { + var containers []*Container + ctrs, err := iopodman.GetContainersByContext().Call(r.Conn, true, false, []string{}) + if err != nil { + return nil, err + } + for _, ctr := range ctrs { + container, err := r.LookupContainer(ctr) + if err != nil { + return nil, err + } + containers = append(containers, container) + } + return containers, nil +} + func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Container, error) { var containers []*Container ctrs, err := iopodman.GetContainersByStatus().Call(r.Conn, filters) @@ -753,3 +776,59 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp } return ok, failures, nil } + +// Restart restarts a container over varlink +func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues) ([]string, map[string]error, error) { + var ( + containers []*Container + restartContainers []*Container + err error + ok = []string{} + failures = map[string]error{} + ) + useTimeout := c.Flag("timeout").Changed || c.Flag("time").Changed + inputTimeout := c.Timeout + + if c.Latest { + lastCtr, err := r.GetLatestContainer() + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to get latest container") + } + restartContainers = append(restartContainers, lastCtr) + } else if c.Running { + containers, err = r.LookupContainersWithStatus([]string{libpod.ContainerStateRunning.String()}) + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, containers...) + } else if c.All { + containers, err = r.GetAllContainers() + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, containers...) + } else { + for _, id := range c.InputArgs { + ctr, err := r.LookupContainer(id) + if err != nil { + return nil, nil, err + } + restartContainers = append(restartContainers, ctr) + } + } + + for _, c := range restartContainers { + c := c + timeout := c.config.StopTimeout + if useTimeout { + timeout = inputTimeout + } + err := c.Restart(int64(timeout)) + if err != nil { + failures[c.ID()] = err + } else { + ok = append(ok, c.ID()) + } + } + return ok, failures, nil +} diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go index 255f844d2..790ed5c89 100644 --- a/pkg/adapter/runtime.go +++ b/pkg/adapter/runtime.go @@ -119,8 +119,8 @@ func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, for } // PruneImages is wrapper into PruneImages within the image pkg -func (r *LocalRuntime) PruneImages(all bool) ([]string, error) { - return r.ImageRuntime().PruneImages(all) +func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) { + return r.ImageRuntime().PruneImages(ctx, all) } // Export is a wrapper to container export to a tarfile diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go index dcb0924ce..29ee821e0 100644 --- a/pkg/adapter/runtime_remote.go +++ b/pkg/adapter/runtime_remote.go @@ -256,7 +256,7 @@ func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authf // IsParent goes through the layers in the store and checks if i.TopLayer is // the parent of any other layer in store. Double check that image with that // layer exists as well. -func (ci *ContainerImage) IsParent() (bool, error) { +func (ci *ContainerImage) IsParent(context.Context) (bool, error) { return ci.remoteImage.isParent, nil } @@ -338,7 +338,7 @@ func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) } // PruneImages is the wrapper call for a remote-client to prune images -func (r *LocalRuntime) PruneImages(all bool) ([]string, error) { +func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) { return iopodman.ImagesPrune().Call(r.Conn, all) } diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go index 470eadaeb..cecddf6b3 100644 --- a/pkg/varlinkapi/images.go +++ b/pkg/varlinkapi/images.go @@ -4,6 +4,7 @@ package varlinkapi import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -49,7 +50,7 @@ func (i *LibpodAPI) ListImages(call iopodman.VarlinkCall) error { } size, _ := image.Size(getContext()) - isParent, err := image.IsParent() + isParent, err := image.IsParent(context.TODO()) if err != nil { return call.ReplyErrorOccurred(err.Error()) } @@ -503,7 +504,7 @@ func (i *LibpodAPI) DeleteUnusedImages(call iopodman.VarlinkCall) error { return call.ReplyErrorOccurred(err.Error()) } if len(containers) == 0 { - if err := img.Remove(false); err != nil { + if err := img.Remove(context.TODO(), false); err != nil { return call.ReplyErrorOccurred(err.Error()) } deletedImages = append(deletedImages, img.ID()) @@ -739,7 +740,7 @@ func (i *LibpodAPI) ContainerRunlabel(call iopodman.VarlinkCall, input iopodman. // ImagesPrune .... func (i *LibpodAPI) ImagesPrune(call iopodman.VarlinkCall, all bool) error { - prunedImages, err := i.Runtime.ImageRuntime().PruneImages(all) + prunedImages, err := i.Runtime.ImageRuntime().PruneImages(context.TODO(), all) if err != nil { return call.ReplyErrorOccurred(err.Error()) } diff --git a/test/e2e/restart_test.go b/test/e2e/restart_test.go index 1daf63a0e..7a9a466d8 100644 --- a/test/e2e/restart_test.go +++ b/test/e2e/restart_test.go @@ -1,5 +1,3 @@ -// +build !remoteclient - package integration import ( diff --git a/test/system/005-info.bats b/test/system/005-info.bats index c64b011bd..47c7a52fc 100644 --- a/test/system/005-info.bats +++ b/test/system/005-info.bats @@ -3,7 +3,7 @@ load helpers @test "podman info - basic test" { - skip_if_remote + skip_if_remote "capitalization inconsistencies" run_podman info @@ -28,7 +28,7 @@ RunRoot: } @test "podman info - json" { - skip_if_remote + skip_if_remote "capitalization inconsistencies" run_podman info --format=json diff --git a/test/system/030-run.bats b/test/system/030-run.bats index bdbe724ef..a29b1adc3 100644 --- a/test/system/030-run.bats +++ b/test/system/030-run.bats @@ -3,8 +3,6 @@ load helpers @test "podman run - basic tests" { - skip_if_remote - rand=$(random_string 30) tests=" true | 0 | diff --git a/test/system/035-logs.bats b/test/system/035-logs.bats index 5736e0939..055865c8d 100644 --- a/test/system/035-logs.bats +++ b/test/system/035-logs.bats @@ -6,8 +6,6 @@ load helpers @test "podman logs - basic test" { - skip_if_remote - rand_string=$(random_string 40) run_podman create $IMAGE echo $rand_string diff --git a/test/system/070-build.bats b/test/system/070-build.bats index c6a25093f..53acf6edd 100644 --- a/test/system/070-build.bats +++ b/test/system/070-build.bats @@ -6,7 +6,11 @@ load helpers @test "podman build - basic test" { - skip_if_remote + if [[ "$PODMAN" =~ -remote ]]; then + if [ "$(id -u)" -ne 0 ]; then + skip "unreliable with podman-remote and rootless; #2972" + fi + fi rand_filename=$(random_string 20) rand_content=$(random_string 50) diff --git a/test/system/400-unprivileged-access.bats b/test/system/400-unprivileged-access.bats index 0358b3beb..738d8d87b 100644 --- a/test/system/400-unprivileged-access.bats +++ b/test/system/400-unprivileged-access.bats @@ -31,6 +31,12 @@ die() { echo "#| FAIL: $*" >&2 echo "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" >&2 + # Show permissions of directories from here on up + while expr "$path" : "/var/lib/containers" >/dev/null; do + echo "#| $(ls -ld $path)" + path=$(dirname $path) + done + exit 1 } @@ -65,8 +71,10 @@ EOF # get podman image and container storage directories run_podman info --format '{{.store.GraphRoot}}' + is "$output" "/var/lib/containers/storage" "GraphRoot in expected place" GRAPH_ROOT="$output" run_podman info --format '{{.store.RunRoot}}' + is "$output" "/var/run/containers/storage" "RunRoot in expected place" RUN_ROOT="$output" # The main test: find all world-writable files or directories underneath diff --git a/vendor.conf b/vendor.conf index 7c56de377..3abb01114 100644 --- a/vendor.conf +++ b/vendor.conf @@ -19,7 +19,7 @@ github.com/containers/image v1.5.1 github.com/vbauerster/mpb v3.3.4 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 -github.com/containers/storage v1.12.1 +github.com/containers/storage v1.12.3 github.com/containers/psgo v1.2 github.com/coreos/go-systemd v14 github.com/coreos/pkg v4 diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go index 40b912bb3..aef6becfe 100644 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. -// source: ./containers.go +// source: containers.go package storage diff --git a/vendor/github.com/containers/storage/drivers/copy/copy.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index bcbc61284..d614b78fc 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -1,4 +1,4 @@ -// +build linux +// +build cgo package copy @@ -153,8 +153,8 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { isHardlink := false - switch f.Mode() & os.ModeType { - case 0: // Regular file + switch mode := f.Mode(); { + case mode.IsRegular(): id := fileID{dev: stat.Dev, ino: stat.Ino} if copyMode == Hardlink { isHardlink = true @@ -172,12 +172,12 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { copiedFiles[id] = dstPath } - case os.ModeDir: + case mode.IsDir(): if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { return err } - case os.ModeSymlink: + case mode&os.ModeSymlink != 0: link, err := os.Readlink(srcPath) if err != nil { return err @@ -187,14 +187,15 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { return err } - case os.ModeNamedPipe: + case mode&os.ModeNamedPipe != 0: fallthrough - case os.ModeSocket: + + case mode&os.ModeSocket != 0: if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { return err } - case os.ModeDevice: + case mode&os.ModeDevice != 0: if rsystem.RunningInUserNS() { // cannot create a device if running in user namespace return nil @@ -204,7 +205,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { } default: - return fmt.Errorf("unknown file type for %s", srcPath) + return fmt.Errorf("unknown file type with mode %v for %s", mode, srcPath) } // Everything below is copying metadata from src to dst. All this metadata diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go new file mode 100644 index 000000000..4d44f2f35 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -0,0 +1,19 @@ +// +build !linux !cgo + +package copy + +import "github.com/containers/storage/pkg/chrootarchive" + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota +) + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling soft links +func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go index 58abca477..f63845252 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -119,10 +119,17 @@ func checkDevHasFS(dev string) error { } func verifyBlockDevice(dev string, force bool) error { - if err := checkDevAvailable(dev); err != nil { + realPath, err := filepath.Abs(dev) + if err != nil { + return errors.Errorf("unable to get absolute path for %s: %s", dev, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return errors.Errorf("failed to canonicalise path for %s: %s", dev, err) + } + if err := checkDevAvailable(realPath); err != nil { return err } - if err := checkDevInVG(dev); err != nil { + if err := checkDevInVG(realPath); err != nil { return err } @@ -130,7 +137,7 @@ func verifyBlockDevice(dev string, force bool) error { return nil } - if err := checkDevHasFS(dev); err != nil { + if err := checkDevHasFS(realPath); err != nil { return err } return nil diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 657d9b3ce..69036a5c1 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -796,7 +796,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) mountProgram.Dir = d.home - return mountProgram.Run() + var b bytes.Buffer + mountProgram.Stderr = &b + err := mountProgram.Run() + if err != nil { + output := b.String() + if output == "" { + output = "<stderr empty>" + } + return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output) + } + return nil } } else if len(mountData) > pageSize { //FIXME: We need to figure out to get this to work with additional stores diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go index 539acfe93..6b40ebd59 100644 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. -// source: ./images.go +// source: images.go package storage diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go index 3a1befcbe..ed8753337 100644 --- a/vendor/github.com/containers/storage/lockfile.go +++ b/vendor/github.com/containers/storage/lockfile.go @@ -58,8 +58,17 @@ func GetROLockfile(path string) (Locker, error) { return getLockfile(path, true) } -// getLockfile is a helper for GetLockfile and GetROLockfile and returns Locker -// based on the path and read-only property. +// getLockfile returns a Locker object, possibly (depending on the platform) +// working inter-process, and associated with the specified path. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. func getLockfile(path string, ro bool) (Locker, error) { lockfilesLock.Lock() defer lockfilesLock.Unlock() @@ -79,7 +88,7 @@ func getLockfile(path string, ro bool) (Locker, error) { } return locker, nil } - locker, err := getLockFile(path, ro) // platform dependent locker + locker, err := createLockerForPath(path, ro) // platform-dependent locker if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go index a9dc64122..8e0f22cb5 100644 --- a/vendor/github.com/containers/storage/lockfile_unix.go +++ b/vendor/github.com/containers/storage/lockfile_unix.go @@ -13,18 +13,51 @@ import ( "golang.org/x/sys/unix" ) -func getLockFile(path string, ro bool) (Locker, error) { - var fd int - var err error +type lockfile struct { + // rwMutex serializes concurrent reader-writer acquisitions in the same process space + rwMutex *sync.RWMutex + // stateMutex is used to synchronize concurrent accesses to the state below + stateMutex *sync.Mutex + counter int64 + file string + fd uintptr + lw string + locktype int16 + locked bool + ro bool +} + +// openLock opens the file at path and returns the corresponding file +// descriptor. Note that the path is opened read-only when ro is set. If ro +// is unset, openLock will open the path read-write and create the file if +// necessary. +func openLock(path string, ro bool) (int, error) { if ro { - fd, err = unix.Open(path, os.O_RDONLY, 0) - } else { - fd, err = unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) + return unix.Open(path, os.O_RDONLY, 0) } + return unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) +} + +// createLockerForPath returns a Locker object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockerForPath(path string, ro bool) (Locker, error) { + // Check if we can open the lock. + fd, err := openLock(path, ro) if err != nil { return nil, errors.Wrapf(err, "error opening %q", path) } - unix.CloseOnExec(fd) + unix.Close(fd) locktype := unix.F_WRLCK if ro { @@ -34,27 +67,12 @@ func getLockFile(path string, ro bool) (Locker, error) { stateMutex: &sync.Mutex{}, rwMutex: &sync.RWMutex{}, file: path, - fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: int16(locktype), locked: false, ro: ro}, nil } -type lockfile struct { - // rwMutex serializes concurrent reader-writer acquisitions in the same process space - rwMutex *sync.RWMutex - // stateMutex is used to synchronize concurrent accesses to the state below - stateMutex *sync.Mutex - counter int64 - file string - fd uintptr - lw string - locktype int16 - locked bool - ro bool -} - // lock locks the lockfile via FCTNL(2) based on the specified type and // command. func (l *lockfile) lock(l_type int16) { @@ -63,7 +81,6 @@ func (l *lockfile) lock(l_type int16) { Whence: int16(os.SEEK_SET), Start: 0, Len: 0, - Pid: int32(os.Getpid()), } switch l_type { case unix.F_RDLCK: @@ -74,7 +91,16 @@ func (l *lockfile) lock(l_type int16) { panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type)) } l.stateMutex.Lock() + defer l.stateMutex.Unlock() if l.counter == 0 { + // If we're the first reference on the lock, we need to open the file again. + fd, err := openLock(l.file, l.ro) + if err != nil { + panic(fmt.Sprintf("error opening %q", l.file)) + } + unix.CloseOnExec(fd) + l.fd = uintptr(fd) + // Optimization: only use the (expensive) fcntl syscall when // the counter is 0. In this case, we're either the first // reader lock or a writer lock. @@ -85,7 +111,6 @@ func (l *lockfile) lock(l_type int16) { l.locktype = l_type l.locked = true l.counter++ - l.stateMutex.Unlock() } // Lock locks the lockfile as a writer. Note that RLock() will be called if @@ -133,6 +158,8 @@ func (l *lockfile) Unlock() { for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { time.Sleep(10 * time.Millisecond) } + // Close the file descriptor on the last unlock. + unix.Close(int(l.fd)) } if l.locktype == unix.F_RDLCK { l.rwMutex.RUnlock() diff --git a/vendor/github.com/containers/storage/lockfile_windows.go b/vendor/github.com/containers/storage/lockfile_windows.go index a3821bfeb..c02069495 100644 --- a/vendor/github.com/containers/storage/lockfile_windows.go +++ b/vendor/github.com/containers/storage/lockfile_windows.go @@ -8,7 +8,20 @@ import ( "time" ) -func getLockFile(path string, ro bool) (Locker, error) { +// createLockerForPath returns a Locker object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockerForPath(path string, ro bool) (Locker, error) { return &lockfile{locked: false}, nil } diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go index c56aa86a2..86f98f16e 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/parser.go +++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go @@ -2,6 +2,8 @@ package idtools import ( "fmt" + "math" + "math/bits" "strconv" "strings" ) @@ -31,10 +33,11 @@ func parseTriple(spec []string) (container, host, size uint32, err error) { // ParseIDMap parses idmap triples from string. func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { + stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) for _, idMapSpec := range mapSpec { idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec)) if len(idSpec)%3 != 0 { - return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) + return nil, stdErr } for i := range idSpec { if i%3 != 0 { @@ -42,7 +45,11 @@ func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) } cid, hid, size, err := parseTriple(idSpec[i : i+3]) if err != nil { - return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) + return nil, stdErr + } + // Avoid possible integer overflow on 32bit builds + if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) { + return nil, stdErr } mapping := IDMap{ ContainerID: int(cid), diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 7e39e3959..27b00f6fe 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -460,6 +460,9 @@ type Store interface { // Version returns version information, in the form of key-value pairs, from // the storage package. Version() ([][2]string, error) + + // GetDigestLock returns digest-specific Locker. + GetDigestLock(digest.Digest) (Locker, error) } // IDMappingOptions are used for specifying how ID mapping should be set up for @@ -529,6 +532,7 @@ type store struct { imageStore ImageStore roImageStores []ROImageStore containerStore ContainerStore + digestLockRoot string } // GetStore attempts to find an already-created Store object matching the @@ -698,9 +702,20 @@ func (s *store) load() error { return err } s.containerStore = rcs + + s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") + if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { + return err + } + return nil } +// GetDigestLock returns a digest-specific Locker. +func (s *store) GetDigestLock(d digest.Digest) (Locker, error) { + return GetLockfile(filepath.Join(s.digestLockRoot, d.String())) +} + func (s *store) getGraphDriver() (drivers.Driver, error) { if s.graphDriver != nil { return s.graphDriver, nil @@ -1023,8 +1038,9 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) } var layer, parentLayer *Layer + allStores := append([]ROLayerStore{rlstore}, lstores...) // Locate the image's top layer and its parent, if it has one. - for _, s := range append([]ROLayerStore{rlstore}, lstores...) { + for _, s := range allStores { store := s if store != rlstore { store.Lock() @@ -1041,10 +1057,13 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea // We want the layer's parent, too, if it has one. var cParentLayer *Layer if cLayer.Parent != "" { - // Its parent should be around here, somewhere. - if cParentLayer, err = store.Get(cLayer.Parent); err != nil { - // Nope, couldn't find it. We're not going to be able - // to diff this one properly. + // Its parent should be in one of the stores, somewhere. + for _, ps := range allStores { + if cParentLayer, err = ps.Get(cLayer.Parent); err == nil { + break + } + } + if cParentLayer == nil { continue } } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index e74956c9e..6c9f163a3 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -6,6 +6,7 @@ import ( "os/exec" "os/user" "path/filepath" + "strconv" "strings" "github.com/BurntSushi/toml" @@ -73,7 +74,7 @@ func GetRootlessRuntimeDir(rootlessUid int) (string, error) { if runtimeDir == "" { tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid) st, err := system.Stat(tmpDir) - if err == nil && int(st.UID()) == os.Getuid() && st.Mode() == 0700 { + if err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 { return tmpDir, nil } } @@ -158,6 +159,21 @@ func getTomlStorage(storeOptions *StoreOptions) *tomlConfig { return config } +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) { + uid := getRootlessUID() + return DefaultStoreOptions(uid != 0, uid) +} + // DefaultStoreOptions returns the default storage ops for containers func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) { var ( @@ -166,14 +182,14 @@ func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) { err error ) storageOpts := defaultStoreOptions - if rootless { + if rootless && rootlessUid != 0 { storageOpts, err = getRootlessStorageOpts(rootlessUid) if err != nil { return storageOpts, err } } - storageConf, err := DefaultConfigFile(rootless) + storageConf, err := DefaultConfigFile(rootless && rootlessUid != 0) if err != nil { return storageOpts, err } @@ -188,7 +204,7 @@ func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) { return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf) } - if rootless { + if rootless && rootlessUid != 0 { if err == nil { // If the file did not specify a graphroot or runroot, // set sane defaults so we don't try and use root-owned |