summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/adapter/client.go47
-rw-r--r--libpod/adapter/containers_remote.go50
-rw-r--r--libpod/adapter/images_remote.go24
-rw-r--r--libpod/adapter/info_remote.go56
-rw-r--r--libpod/adapter/runtime.go312
-rw-r--r--libpod/adapter/runtime_remote.go674
-rw-r--r--libpod/adapter/volumes_remote.go33
-rw-r--r--libpod/boltdb_state.go92
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/container_commit.go2
-rw-r--r--libpod/container_internal.go23
-rw-r--r--libpod/container_internal_linux.go43
-rw-r--r--libpod/image/image.go69
-rw-r--r--libpod/image/search.go277
-rw-r--r--libpod/image/utils.go26
-rw-r--r--libpod/in_memory_state.go64
-rw-r--r--libpod/lock/in_memory_locks.go11
-rw-r--r--libpod/lock/lock.go14
-rw-r--r--libpod/lock/shm/shm_lock.c36
-rw-r--r--libpod/lock/shm/shm_lock.go16
-rw-r--r--libpod/lock/shm/shm_lock.h1
-rw-r--r--libpod/lock/shm/shm_lock_test.go28
-rw-r--r--libpod/lock/shm_lock_manager_linux.go7
-rw-r--r--libpod/lock/shm_lock_manager_unsupported.go5
-rw-r--r--libpod/options.go21
-rw-r--r--libpod/runtime.go43
-rw-r--r--libpod/runtime_ctr.go2
-rw-r--r--libpod/runtime_img.go44
-rw-r--r--libpod/runtime_renumber.go57
-rw-r--r--libpod/runtime_volume.go13
-rw-r--r--libpod/runtime_volume_linux.go31
-rw-r--r--libpod/state.go24
-rw-r--r--libpod/state_test.go72
-rw-r--r--libpod/volume.go5
-rw-r--r--libpod/volume_internal.go3
35 files changed, 941 insertions, 1291 deletions
diff --git a/libpod/adapter/client.go b/libpod/adapter/client.go
deleted file mode 100644
index 6512a5952..000000000
--- a/libpod/adapter/client.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "os"
-
- "github.com/sirupsen/logrus"
- "github.com/varlink/go/varlink"
-)
-
-// DefaultAddress is the default address of the varlink socket
-const DefaultAddress = "unix:/run/podman/io.podman"
-
-// Connect provides a varlink connection
-func (r RemoteRuntime) Connect() (*varlink.Connection, error) {
- var err error
- var connection *varlink.Connection
- if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" {
- logrus.Infof("Connecting with varlink bridge")
- logrus.Debugf("%s", bridge)
- connection, err = varlink.NewBridge(bridge)
- } else {
- address := os.Getenv("PODMAN_VARLINK_ADDRESS")
- if address == "" {
- address = DefaultAddress
- }
- logrus.Infof("Connecting with varlink address")
- logrus.Debugf("%s", address)
- connection, err = varlink.NewConnection(address)
- }
- if err != nil {
- return nil, err
- }
- return connection, nil
-}
-
-// RefreshConnection is used to replace the current r.Conn after things like
-// using an upgraded varlink connection
-func (r RemoteRuntime) RefreshConnection() error {
- newConn, err := r.Connect()
- if err != nil {
- return err
- }
- r.Conn = newConn
- return nil
-}
diff --git a/libpod/adapter/containers_remote.go b/libpod/adapter/containers_remote.go
deleted file mode 100644
index 9623304e5..000000000
--- a/libpod/adapter/containers_remote.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "encoding/json"
-
- iopodman "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/inspect"
-)
-
-// Inspect returns an inspect struct from varlink
-func (c *Container) Inspect(size bool) (*inspect.ContainerInspectData, error) {
- reply, err := iopodman.ContainerInspectData().Call(c.Runtime.Conn, c.ID())
- if err != nil {
- return nil, err
- }
- data := inspect.ContainerInspectData{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return &data, err
-}
-
-// ID returns the ID of the container
-func (c *Container) ID() string {
- return c.config.ID
-}
-
-// GetArtifact returns a container's artifacts
-func (c *Container) GetArtifact(name string) ([]byte, error) {
- var data []byte
- reply, err := iopodman.ContainerArtifacts().Call(c.Runtime.Conn, c.ID(), name)
- if err != nil {
- return nil, err
- }
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return data, err
-}
-
-// Config returns a container's Config ... same as ctr.Config()
-func (c *Container) Config() *libpod.ContainerConfig {
- if c.config != nil {
- return c.config
- }
- return c.Runtime.Config(c.ID())
-}
diff --git a/libpod/adapter/images_remote.go b/libpod/adapter/images_remote.go
deleted file mode 100644
index e7b38dccc..000000000
--- a/libpod/adapter/images_remote.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "context"
- "encoding/json"
-
- iopodman "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/pkg/inspect"
-)
-
-// Inspect returns returns an ImageData struct from over a varlink connection
-func (i *ContainerImage) Inspect(ctx context.Context) (*inspect.ImageData, error) {
- reply, err := iopodman.InspectImage().Call(i.Runtime.Conn, i.ID())
- if err != nil {
- return nil, err
- }
- data := inspect.ImageData{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return &data, nil
-}
diff --git a/libpod/adapter/info_remote.go b/libpod/adapter/info_remote.go
deleted file mode 100644
index 3b691ed17..000000000
--- a/libpod/adapter/info_remote.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "encoding/json"
-
- "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
-)
-
-// Info returns information for the host system and its components
-func (r RemoteRuntime) Info() ([]libpod.InfoData, error) {
- // TODO the varlink implementation for info should be updated to match the output for regular info
- var (
- reply []libpod.InfoData
- hostInfo map[string]interface{}
- store map[string]interface{}
- )
-
- registries := make(map[string]interface{})
- insecureRegistries := make(map[string]interface{})
- conn, err := r.Connect()
- if err != nil {
- return nil, err
- }
- defer conn.Close()
- info, err := iopodman.GetInfo().Call(conn)
- if err != nil {
- return nil, err
- }
-
- // info.host -> map[string]interface{}
- h, err := json.Marshal(info.Host)
- if err != nil {
- return nil, err
- }
- json.Unmarshal(h, &hostInfo)
-
- // info.store -> map[string]interface{}
- s, err := json.Marshal(info.Store)
- if err != nil {
- return nil, err
- }
- json.Unmarshal(s, &store)
-
- registries["registries"] = info.Registries
- insecureRegistries["registries"] = info.Insecure_registries
-
- // Add everything to the reply
- reply = append(reply, libpod.InfoData{Type: "host", Data: hostInfo})
- reply = append(reply, libpod.InfoData{Type: "registries", Data: registries})
- reply = append(reply, libpod.InfoData{Type: "insecure registries", Data: insecureRegistries})
- reply = append(reply, libpod.InfoData{Type: "store", Data: store})
- return reply, nil
-}
diff --git a/libpod/adapter/runtime.go b/libpod/adapter/runtime.go
deleted file mode 100644
index 02ef9af07..000000000
--- a/libpod/adapter/runtime.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// +build !remoteclient
-
-package adapter
-
-import (
- "context"
- "io"
- "io/ioutil"
- "os"
- "strconv"
-
- "github.com/containers/buildah"
- "github.com/containers/buildah/imagebuildah"
- "github.com/containers/buildah/pkg/parse"
- "github.com/containers/image/docker/reference"
- "github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/image"
- "github.com/containers/libpod/pkg/rootless"
- "github.com/pkg/errors"
-)
-
-// LocalRuntime describes a typical libpod runtime
-type LocalRuntime struct {
- *libpod.Runtime
- Remote bool
-}
-
-// ContainerImage ...
-type ContainerImage struct {
- *image.Image
-}
-
-// Container ...
-type Container struct {
- *libpod.Container
-}
-
-// Volume ...
-type Volume struct {
- *libpod.Volume
-}
-
-// VolumeFilter is for filtering volumes on the client
-type VolumeFilter func(*Volume) bool
-
-// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
- runtime, err := libpodruntime.GetRuntime(c)
- if err != nil {
- return nil, err
- }
- return &LocalRuntime{
- Runtime: runtime,
- }, nil
-}
-
-// GetImages returns a slice of images in containerimages
-func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
- var containerImages []*ContainerImage
- images, err := r.Runtime.ImageRuntime().GetImages()
- if err != nil {
- return nil, err
- }
- for _, i := range images {
- containerImages = append(containerImages, &ContainerImage{i})
- }
- return containerImages, nil
-
-}
-
-// NewImageFromLocal returns a containerimage representation of a image from local storage
-func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
- img, err := r.Runtime.ImageRuntime().NewFromLocal(name)
- if err != nil {
- return nil, err
- }
- return &ContainerImage{img}, nil
-}
-
-// LoadFromArchiveReference calls into local storage to load an image from an archive
-func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
- var containerImages []*ContainerImage
- imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer)
- if err != nil {
- return nil, err
- }
- for _, i := range imgs {
- ci := ContainerImage{i}
- containerImages = append(containerImages, &ci)
- }
- return containerImages, nil
-}
-
-// New calls into local storage to look for an image in local storage or to pull it
-func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
- img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, forcePull, label)
- if err != nil {
- return nil, err
- }
- return &ContainerImage{img}, nil
-}
-
-// RemoveImage calls into local storage and removes an image
-func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
- return r.Runtime.RemoveImage(ctx, img.Image, force)
-}
-
-// LookupContainer ...
-func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
- ctr, err := r.Runtime.LookupContainer(idOrName)
- if err != nil {
- return nil, err
- }
- return &Container{ctr}, nil
-}
-
-// PruneImages is wrapper into PruneImages within the image pkg
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
- return r.ImageRuntime().PruneImages(all)
-}
-
-// Export is a wrapper to container export to a tarfile
-func (r *LocalRuntime) Export(name string, path string) error {
- ctr, err := r.Runtime.LookupContainer(name)
- if err != nil {
- return errors.Wrapf(err, "error looking up container %q", name)
- }
- if os.Geteuid() != 0 {
- state, err := ctr.State()
- if err != nil {
- return errors.Wrapf(err, "cannot read container state %q", ctr.ID())
- }
- if state == libpod.ContainerStateRunning || state == libpod.ContainerStatePaused {
- data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
- if err != nil {
- return errors.Wrapf(err, "cannot read conmon PID file %q", ctr.Config().ConmonPidFile)
- }
- conmonPid, err := strconv.Atoi(string(data))
- if err != nil {
- return errors.Wrapf(err, "cannot parse PID %q", data)
- }
- became, ret, err := rootless.JoinDirectUserAndMountNS(uint(conmonPid))
- if err != nil {
- return err
- }
- if became {
- os.Exit(ret)
- }
- } else {
- became, ret, err := rootless.BecomeRootInUserNS()
- if err != nil {
- return err
- }
- if became {
- os.Exit(ret)
- }
- }
- }
-
- return ctr.Export(path)
-}
-
-// Import is a wrapper to import a container image
-func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
- return r.Runtime.Import(ctx, source, reference, changes, history, quiet)
-}
-
-// CreateVolume is a wrapper to create volumes
-func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) {
- var (
- options []libpod.VolumeCreateOption
- volName string
- )
-
- if len(c.InputArgs) > 0 {
- volName = c.InputArgs[0]
- options = append(options, libpod.WithVolumeName(volName))
- }
-
- if c.Flag("driver").Changed {
- options = append(options, libpod.WithVolumeDriver(c.Driver))
- }
-
- if len(labels) != 0 {
- options = append(options, libpod.WithVolumeLabels(labels))
- }
-
- if len(options) != 0 {
- options = append(options, libpod.WithVolumeOptions(opts))
- }
- newVolume, err := r.NewVolume(ctx, options...)
- if err != nil {
- return "", err
- }
- return newVolume.Name(), nil
-}
-
-// RemoveVolumes is a wrapper to remove volumes
-func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, error) {
- return r.Runtime.RemoveVolumes(ctx, c.InputArgs, c.All, c.Force)
-}
-
-// Push is a wrapper to push an image to a registry
-func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
- newImage, err := r.ImageRuntime().NewFromLocal(srcName)
- if err != nil {
- return err
- }
- return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil)
-}
-
-// InspectVolumes returns a slice of volumes based on an arg list or --all
-func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*Volume, error) {
- var (
- volumes []*libpod.Volume
- err error
- )
-
- if c.All {
- volumes, err = r.GetAllVolumes()
- } else {
- for _, v := range c.InputArgs {
- vol, err := r.GetVolume(v)
- if err != nil {
- return nil, err
- }
- volumes = append(volumes, vol)
- }
- }
- if err != nil {
- return nil, err
- }
- return libpodVolumeToVolume(volumes), nil
-}
-
-// Volumes returns a slice of localruntime volumes
-func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) {
- vols, err := r.GetAllVolumes()
- if err != nil {
- return nil, err
- }
- return libpodVolumeToVolume(vols), nil
-}
-
-// libpodVolumeToVolume converts a slice of libpod volumes to a slice
-// of localruntime volumes (same as libpod)
-func libpodVolumeToVolume(volumes []*libpod.Volume) []*Volume {
- var vols []*Volume
- for _, v := range volumes {
- newVol := Volume{
- v,
- }
- vols = append(vols, &newVol)
- }
- return vols
-}
-
-// Build is the wrapper to build images
-func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) error {
- namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c.PodmanCommand.Command)
- if err != nil {
- return errors.Wrapf(err, "error parsing namespace-related options")
- }
- usernsOption, idmappingOptions, err := parse.IDMappingOptions(c.PodmanCommand.Command)
- if err != nil {
- return errors.Wrapf(err, "error parsing ID mapping options")
- }
- namespaceOptions.AddOrReplace(usernsOption...)
-
- systemContext, err := parse.SystemContextFromOptions(c.PodmanCommand.Command)
- if err != nil {
- return errors.Wrapf(err, "error building system context")
- }
-
- authfile := c.Authfile
- if len(c.Authfile) == 0 {
- authfile = os.Getenv("REGISTRY_AUTH_FILE")
- }
-
- systemContext.AuthFilePath = authfile
- commonOpts, err := parse.CommonBuildOptions(c.PodmanCommand.Command)
- if err != nil {
- return err
- }
-
- options.NamespaceOptions = namespaceOptions
- options.ConfigureNetwork = networkPolicy
- options.IDMappingOptions = idmappingOptions
- options.CommonBuildOpts = commonOpts
- options.SystemContext = systemContext
-
- if c.Flag("runtime").Changed {
- options.Runtime = r.GetOCIRuntimePath()
- }
- if c.Quiet {
- options.ReportWriter = ioutil.Discard
- }
-
- if rootless.IsRootless() {
- options.Isolation = buildah.IsolationOCIRootless
- }
-
- return r.Runtime.Build(ctx, options, dockerfiles...)
-}
-
-// PruneVolumes is a wrapper function for libpod PruneVolumes
-func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) {
- return r.Runtime.PruneVolumes(ctx)
-}
diff --git a/libpod/adapter/runtime_remote.go b/libpod/adapter/runtime_remote.go
deleted file mode 100644
index f63b5875d..000000000
--- a/libpod/adapter/runtime_remote.go
+++ /dev/null
@@ -1,674 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "bufio"
- "context"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "strings"
- "time"
-
- "github.com/containers/buildah/imagebuildah"
- "github.com/containers/image/docker/reference"
- "github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/image"
- "github.com/containers/storage/pkg/archive"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/varlink/go/varlink"
-)
-
-// ImageRuntime is wrapper for image runtime
-type RemoteImageRuntime struct{}
-
-// RemoteRuntime describes a wrapper runtime struct
-type RemoteRuntime struct {
- Conn *varlink.Connection
- Remote bool
-}
-
-// LocalRuntime describes a typical libpod runtime
-type LocalRuntime struct {
- *RemoteRuntime
-}
-
-// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
- runtime := RemoteRuntime{}
- conn, err := runtime.Connect()
- if err != nil {
- return nil, err
- }
- rr := RemoteRuntime{
- Conn: conn,
- Remote: true,
- }
- foo := LocalRuntime{
- &rr,
- }
- return &foo, nil
-}
-
-// Shutdown is a bogus wrapper for compat with the libpod runtime
-func (r RemoteRuntime) Shutdown(force bool) error {
- return nil
-}
-
-// ContainerImage
-type ContainerImage struct {
- remoteImage
-}
-
-type remoteImage struct {
- ID string
- Labels map[string]string
- RepoTags []string
- RepoDigests []string
- Parent string
- Size int64
- Created time.Time
- InputName string
- Names []string
- Digest digest.Digest
- isParent bool
- Runtime *LocalRuntime
-}
-
-// Container ...
-type Container struct {
- remoteContainer
-}
-
-// remoteContainer ....
-type remoteContainer struct {
- Runtime *LocalRuntime
- config *libpod.ContainerConfig
- state *libpod.ContainerState
-}
-
-type VolumeFilter func(*Volume) bool
-
-// Volume is embed for libpod volumes
-type Volume struct {
- remoteVolume
-}
-
-type remoteVolume struct {
- Runtime *LocalRuntime
- config *libpod.VolumeConfig
-}
-
-// GetImages returns a slice of containerimages over a varlink connection
-func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
- var newImages []*ContainerImage
- images, err := iopodman.ListImages().Call(r.Conn)
- if err != nil {
- return nil, err
- }
- for _, i := range images {
- name := i.Id
- if len(i.RepoTags) > 1 {
- name = i.RepoTags[0]
- }
- newImage, err := imageInListToContainerImage(i, name, r)
- if err != nil {
- return nil, err
- }
- newImages = append(newImages, newImage)
- }
- return newImages, nil
-}
-
-func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRuntime) (*ContainerImage, error) {
- created, err := time.ParseInLocation(time.RFC3339, i.Created, time.UTC)
- if err != nil {
- return nil, err
- }
- ri := remoteImage{
- InputName: name,
- ID: i.Id,
- Labels: i.Labels,
- RepoTags: i.RepoTags,
- RepoDigests: i.RepoTags,
- Parent: i.ParentId,
- Size: i.Size,
- Created: created,
- Names: i.RepoTags,
- isParent: i.IsParent,
- Runtime: runtime,
- }
- return &ContainerImage{ri}, nil
-}
-
-// NewImageFromLocal returns a container image representation of a image over varlink
-func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
- img, err := iopodman.GetImage().Call(r.Conn, name)
- if err != nil {
- return nil, err
- }
- return imageInListToContainerImage(img, name, r)
-
-}
-
-// LoadFromArchiveReference creates an image from a local archive
-func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
- // TODO We need to find a way to leak certDir, creds, and the tlsverify into this function, normally this would
- // come from cli options but we don't want want those in here either.
- tlsverify := true
- imageID, err := iopodman.PullImage().Call(r.Conn, srcRef.DockerReference().String(), "", "", signaturePolicyPath, &tlsverify)
- if err != nil {
- return nil, err
- }
- newImage, err := r.NewImageFromLocal(imageID)
- if err != nil {
- return nil, err
- }
- return []*ContainerImage{newImage}, nil
-}
-
-// New calls into local storage to look for an image in local storage or to pull it
-func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
- if label != nil {
- return nil, errors.New("the remote client function does not support checking a remote image for a label")
- }
- var (
- tlsVerify bool
- tlsVerifyPtr *bool
- )
- if dockeroptions.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
- tlsVerify = true
- tlsVerifyPtr = &tlsVerify
-
- }
- if dockeroptions.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue {
- tlsVerify = false
- tlsVerifyPtr = &tlsVerify
- }
-
- imageID, err := iopodman.PullImage().Call(r.Conn, name, dockeroptions.DockerCertPath, "", signaturePolicyPath, tlsVerifyPtr)
- if err != nil {
- return nil, err
- }
- newImage, err := r.NewImageFromLocal(imageID)
- if err != nil {
- return nil, err
- }
- return newImage, nil
-}
-
-// IsParent goes through the layers in the store and checks if i.TopLayer is
-// the parent of any other layer in store. Double check that image with that
-// layer exists as well.
-func (ci *ContainerImage) IsParent() (bool, error) {
- return ci.remoteImage.isParent, nil
-}
-
-// ID returns the image ID as a string
-func (ci *ContainerImage) ID() string {
- return ci.remoteImage.ID
-}
-
-// Names returns a string array of names associated with the image
-func (ci *ContainerImage) Names() []string {
- return ci.remoteImage.Names
-}
-
-// Created returns the time the image was created
-func (ci *ContainerImage) Created() time.Time {
- return ci.remoteImage.Created
-}
-
-// Size returns the size of the image
-func (ci *ContainerImage) Size(ctx context.Context) (*uint64, error) {
- usize := uint64(ci.remoteImage.Size)
- return &usize, nil
-}
-
-// Digest returns the image's digest
-func (ci *ContainerImage) Digest() digest.Digest {
- return ci.remoteImage.Digest
-}
-
-// Labels returns a map of the image's labels
-func (ci *ContainerImage) Labels(ctx context.Context) (map[string]string, error) {
- return ci.remoteImage.Labels, nil
-}
-
-// Dangling returns a bool if the image is "dangling"
-func (ci *ContainerImage) Dangling() bool {
- return len(ci.Names()) == 0
-}
-
-// TagImage ...
-func (ci *ContainerImage) TagImage(tag string) error {
- _, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag)
- return err
-}
-
-// RemoveImage calls varlink to remove an image
-func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
- return iopodman.RemoveImage().Call(r.Conn, img.InputName, force)
-}
-
-// History returns the history of an image and its layers
-func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) {
- var imageHistories []*image.History
-
- reply, err := iopodman.HistoryImage().Call(ci.Runtime.Conn, ci.InputName)
- if err != nil {
- return nil, err
- }
- for _, h := range reply {
- created, err := time.ParseInLocation(time.RFC3339, h.Created, time.UTC)
- if err != nil {
- return nil, err
- }
- ih := image.History{
- ID: h.Id,
- Created: &created,
- CreatedBy: h.CreatedBy,
- Size: h.Size,
- Comment: h.Comment,
- }
- imageHistories = append(imageHistories, &ih)
- }
- return imageHistories, nil
-}
-
-// LookupContainer gets basic information about container over a varlink
-// connection and then translates it to a *Container
-func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
- state, err := r.ContainerState(idOrName)
- if err != nil {
- return nil, err
- }
- config := r.Config(idOrName)
- if err != nil {
- return nil, err
- }
-
- rc := remoteContainer{
- r,
- config,
- state,
- }
-
- c := Container{
- rc,
- }
- return &c, nil
-}
-
-func (r *LocalRuntime) GetLatestContainer() (*Container, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// ContainerState returns the "state" of the container.
-func (r *LocalRuntime) ContainerState(name string) (*libpod.ContainerState, error) { //no-lint
- reply, err := iopodman.ContainerStateData().Call(r.Conn, name)
- if err != nil {
- return nil, err
- }
- data := libpod.ContainerState{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return &data, err
-
-}
-
-// Config returns a container config
-func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig {
- // TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer
- // further looking into it for after devconf.
- // The libpod function for this has no errors so we are kind of in a tough
- // spot here. Logging the errors for now.
- reply, err := iopodman.ContainerConfig().Call(r.Conn, name)
- if err != nil {
- logrus.Error("call to container.config failed")
- }
- data := libpod.ContainerConfig{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- logrus.Error("failed to unmarshal container inspect data")
- }
- return &data
-
-}
-
-// PruneImages is the wrapper call for a remote-client to prune images
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
- return iopodman.ImagesPrune().Call(r.Conn, all)
-}
-
-// Export is a wrapper to container export to a tarfile
-func (r *LocalRuntime) Export(name string, path string) error {
- tempPath, err := iopodman.ExportContainer().Call(r.Conn, name, "")
- if err != nil {
- return err
- }
-
- outputFile, err := os.Create(path)
- if err != nil {
- return err
- }
- defer outputFile.Close()
-
- writer := bufio.NewWriter(outputFile)
- defer writer.Flush()
-
- reply, err := iopodman.ReceiveFile().Send(r.Conn, varlink.Upgrade, tempPath, true)
- if err != nil {
- return err
- }
-
- length, _, err := reply()
- if err != nil {
- return errors.Wrap(err, "unable to get file length for transfer")
- }
-
- reader := r.Conn.Reader
- if _, err := io.CopyN(writer, reader, length); err != nil {
- return errors.Wrap(err, "file transer failed")
- }
-
- return nil
-}
-
-// Import implements the remote calls required to import a container image to the store
-func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
- // First we send the file to the host
- tempFile, err := r.SendFileOverVarlink(source)
- if err != nil {
- return "", err
- }
- return iopodman.ImportImage().Call(r.Conn, strings.TrimRight(tempFile, ":"), reference, history, changes, true)
-}
-
-func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) error {
- buildOptions := iopodman.BuildOptions{
- AddHosts: options.CommonBuildOpts.AddHost,
- CgroupParent: options.CommonBuildOpts.CgroupParent,
- CpuPeriod: int64(options.CommonBuildOpts.CPUPeriod),
- CpuQuota: options.CommonBuildOpts.CPUQuota,
- CpuShares: int64(options.CommonBuildOpts.CPUShares),
- CpusetCpus: options.CommonBuildOpts.CPUSetMems,
- CpusetMems: options.CommonBuildOpts.CPUSetMems,
- Memory: options.CommonBuildOpts.Memory,
- MemorySwap: options.CommonBuildOpts.MemorySwap,
- ShmSize: options.CommonBuildOpts.ShmSize,
- Ulimit: options.CommonBuildOpts.Ulimit,
- Volume: options.CommonBuildOpts.Volumes,
- }
-
- buildinfo := iopodman.BuildInfo{
- AdditionalTags: options.AdditionalTags,
- Annotations: options.Annotations,
- BuildArgs: options.Args,
- BuildOptions: buildOptions,
- CniConfigDir: options.CNIConfigDir,
- CniPluginDir: options.CNIPluginPath,
- Compression: string(options.Compression),
- DefaultsMountFilePath: options.DefaultMountsFilePath,
- Dockerfiles: dockerfiles,
- //Err: string(options.Err),
- ForceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
- Iidfile: options.IIDFile,
- Label: options.Labels,
- Layers: options.Layers,
- Nocache: options.NoCache,
- //Out:
- Output: options.Output,
- OutputFormat: options.OutputFormat,
- PullPolicy: options.PullPolicy.String(),
- Quiet: options.Quiet,
- RemoteIntermediateCtrs: options.RemoveIntermediateCtrs,
- //ReportWriter:
- RuntimeArgs: options.RuntimeArgs,
- SignaturePolicyPath: options.SignaturePolicyPath,
- Squash: options.Squash,
- }
- // tar the file
- logrus.Debugf("creating tarball of context dir %s", options.ContextDirectory)
- input, err := archive.Tar(options.ContextDirectory, archive.Uncompressed)
- if err != nil {
- return errors.Wrapf(err, "unable to create tarball of context dir %s", options.ContextDirectory)
- }
-
- // Write the tarball to the fs
- // TODO we might considering sending this without writing to the fs for the sake of performance
- // under given conditions like memory availability.
- outputFile, err := ioutil.TempFile("", "varlink_tar_send")
- if err != nil {
- return err
- }
- defer outputFile.Close()
- logrus.Debugf("writing context dir tarball to %s", outputFile.Name())
-
- _, err = io.Copy(outputFile, input)
- if err != nil {
- return err
- }
-
- logrus.Debugf("completed writing context dir tarball %s", outputFile.Name())
- // Send the context dir tarball over varlink.
- tempFile, err := r.SendFileOverVarlink(outputFile.Name())
- if err != nil {
- return err
- }
- buildinfo.ContextDir = strings.Replace(tempFile, ":", "", -1)
-
- reply, err := iopodman.BuildImage().Send(r.Conn, varlink.More, buildinfo)
- if err != nil {
- return err
- }
-
- for {
- responses, flags, err := reply()
- if err != nil {
- return err
- }
- for _, line := range responses.Logs {
- fmt.Print(line)
- }
- if flags&varlink.Continues == 0 {
- break
- }
- }
- return err
-}
-
-// SendFileOverVarlink sends a file over varlink in an upgraded connection
-func (r *LocalRuntime) SendFileOverVarlink(source string) (string, error) {
- fs, err := os.Open(source)
- if err != nil {
- return "", err
- }
-
- fileInfo, err := fs.Stat()
- if err != nil {
- return "", err
- }
- logrus.Debugf("sending %s over varlink connection", source)
- reply, err := iopodman.SendFile().Send(r.Conn, varlink.Upgrade, "", int64(fileInfo.Size()))
- if err != nil {
- return "", err
- }
- _, _, err = reply()
- if err != nil {
- return "", err
- }
-
- reader := bufio.NewReader(fs)
- _, err = reader.WriteTo(r.Conn.Writer)
- if err != nil {
- return "", err
- }
- logrus.Debugf("file transfer complete for %s", source)
- r.Conn.Writer.Flush()
-
- // All was sent, wait for the ACK from the server
- tempFile, err := r.Conn.Reader.ReadString(':')
- if err != nil {
- return "", err
- }
-
- // r.Conn is kaput at this point due to the upgrade
- if err := r.RemoteRuntime.RefreshConnection(); err != nil {
- return "", err
-
- }
-
- return tempFile, nil
-}
-
-// GetAllVolumes retrieves all the volumes
-func (r *LocalRuntime) GetAllVolumes() ([]*libpod.Volume, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// RemoveVolume removes a volumes
-func (r *LocalRuntime) RemoveVolume(ctx context.Context, v *libpod.Volume, force, prune bool) error {
- return libpod.ErrNotImplemented
-}
-
-// GetContainers retrieves all containers from the state
-// Filters can be provided which will determine what containers are included in
-// the output. Multiple filters are handled by ANDing their output, so only
-// containers matching all filters are returned
-func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libpod.Container, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// RemoveContainer removes the given container
-// If force is specified, the container will be stopped first
-// Otherwise, RemoveContainer will return an error if the container is running
-func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force, volumes bool) error {
- return libpod.ErrNotImplemented
-}
-
-// CreateVolume creates a volume over a varlink connection for the remote client
-func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) {
- cvOpts := iopodman.VolumeCreateOpts{
- Options: opts,
- Labels: labels,
- }
- if len(c.InputArgs) > 0 {
- cvOpts.VolumeName = c.InputArgs[0]
- }
-
- if c.Flag("driver").Changed {
- cvOpts.Driver = c.Driver
- }
-
- return iopodman.VolumeCreate().Call(r.Conn, cvOpts)
-}
-
-// RemoveVolumes removes volumes over a varlink connection for the remote client
-func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, error) {
- rmOpts := iopodman.VolumeRemoveOpts{
- All: c.All,
- Force: c.Force,
- Volumes: c.InputArgs,
- }
- return iopodman.VolumeRemove().Call(r.Conn, rmOpts)
-}
-
-func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
-
- var (
- tls *bool
- tlsVerify bool
- )
- if dockerRegistryOptions.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue {
- tlsVerify = false
- tls = &tlsVerify
- }
- if dockerRegistryOptions.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
- tlsVerify = true
- tls = &tlsVerify
- }
-
- reply, err := iopodman.PushImage().Send(r.Conn, varlink.More, srcName, destination, tls, signaturePolicyPath, "", dockerRegistryOptions.DockerCertPath, forceCompress, manifestMIMEType, signingOptions.RemoveSignatures, signingOptions.SignBy)
- if err != nil {
- return err
- }
- for {
- responses, flags, err := reply()
- if err != nil {
- return err
- }
- for _, line := range responses.Logs {
- fmt.Print(line)
- }
- if flags&varlink.Continues == 0 {
- break
- }
- }
-
- return err
-}
-
-// InspectVolumes returns a slice of volumes based on an arg list or --all
-func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*Volume, error) {
- reply, err := iopodman.GetVolumes().Call(r.Conn, c.InputArgs, c.All)
- if err != nil {
- return nil, err
- }
- return varlinkVolumeToVolume(r, reply), nil
-}
-
-//Volumes returns a slice of adapter.volumes based on information about libpod
-// volumes over a varlink connection
-func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) {
- reply, err := iopodman.GetVolumes().Call(r.Conn, []string{}, true)
- if err != nil {
- return nil, err
- }
- return varlinkVolumeToVolume(r, reply), nil
-}
-
-func varlinkVolumeToVolume(r *LocalRuntime, volumes []iopodman.Volume) []*Volume {
- var vols []*Volume
- for _, v := range volumes {
- volumeConfig := libpod.VolumeConfig{
- Name: v.Name,
- Labels: v.Labels,
- MountPoint: v.MountPoint,
- Driver: v.Driver,
- Options: v.Options,
- Scope: v.Scope,
- }
- n := remoteVolume{
- Runtime: r,
- config: &volumeConfig,
- }
- newVol := Volume{
- n,
- }
- vols = append(vols, &newVol)
- }
- return vols
-}
-
-// PruneVolumes removes all unused volumes from the remote system
-func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) {
- var errs []error
- prunedNames, prunedErrors, err := iopodman.VolumesPrune().Call(r.Conn)
- if err != nil {
- return []string{}, []error{err}
- }
- // We need to transform the string results of the error into actual error types
- for _, e := range prunedErrors {
- errs = append(errs, errors.New(e))
- }
- return prunedNames, errs
-}
diff --git a/libpod/adapter/volumes_remote.go b/libpod/adapter/volumes_remote.go
deleted file mode 100644
index beacd943a..000000000
--- a/libpod/adapter/volumes_remote.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-// Name returns the name of the volume
-func (v *Volume) Name() string {
- return v.config.Name
-}
-
-//Labels returns the labels for a volume
-func (v *Volume) Labels() map[string]string {
- return v.config.Labels
-}
-
-// Driver returns the driver for the volume
-func (v *Volume) Driver() string {
- return v.config.Driver
-}
-
-// Options returns the options a volume was created with
-func (v *Volume) Options() map[string]string {
- return v.config.Options
-}
-
-// MountPath returns the path the volume is mounted to
-func (v *Volume) MountPoint() string {
- return v.config.MountPoint
-}
-
-// Scope returns the scope for an adapter.volume
-func (v *Volume) Scope() string {
- return v.config.Scope
-}
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 5bc15dd7f..25ef5cd0e 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -783,6 +783,94 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
+// RewriteContainerConfig rewrites a container's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ ctrBkt, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
+ if ctrDB == nil {
+ ctr.valid = false
+ return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ }
+
+ if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// RewritePodConfig rewrites a pod's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !pod.valid {
+ return ErrPodRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ podBkt, err := getPodBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ podDB := podBkt.Bucket([]byte(pod.ID()))
+ if podDB == nil {
+ pod.valid = false
+ return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ }
+
+ if err := podDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating pod %s config JSON", pod.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
@@ -1281,10 +1369,6 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return ErrDBClosed
}
- if !volume.valid {
- return ErrVolumeRemoved
- }
-
volName := []byte(volume.Name())
db, err := s.getDBCon()
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index ea150cfac..3d749849d 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -348,13 +348,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
- // Get the lock
- lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
- if err != nil {
- return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
- }
- volume.lock = lock
-
volume.runtime = s.runtime
volume.valid = true
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 026611e51..5c4fd1a31 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -162,7 +162,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
importBuilder.SetWorkDir(splitChange[1])
}
}
- candidates, _, err := util.ResolveName(destImage, "", sc, c.runtime.store)
+ candidates, _, _, err := util.ResolveName(destImage, "", sc, c.runtime.store)
if err != nil {
return nil, errors.Wrapf(err, "error resolving name %q", destImage)
}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index b5f93c8a0..e3753d825 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -652,16 +652,19 @@ func (c *Container) startDependencies(ctx context.Context) error {
return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID())
}
- ctrErrors := make(map[string]error)
- // reset ctrsVisisted for next round of recursion
- ctrsVisited := make(map[string]bool)
-
// If there are no containers without dependencies, we can't start
// Error out
if len(graph.noDepNodes) == 0 {
+ // we have no dependencies that need starting, go ahead and return
+ if len(graph.nodes) == 0 {
+ return nil
+ }
return errors.Wrapf(ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
}
+ ctrErrors := make(map[string]error)
+ ctrsVisited := make(map[string]bool)
+
// Traverse the graph beginning at nodes with no dependencies
for _, node := range graph.noDepNodes {
startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
@@ -698,10 +701,18 @@ func (c *Container) getAllDependencies(visited map[string]*Container) error {
if err != nil {
return err
}
- visited[depID] = dep
- if err := dep.getAllDependencies(visited); err != nil {
+ status, err := dep.State()
+ if err != nil {
return err
}
+ // if the dependency is already running, we can assume its dependencies are also running
+ // so no need to add them to those we need to start
+ if status != ContainerStateRunning {
+ visited[depID] = dep
+ if err := dep.getAllDependencies(visited); err != nil {
+ return err
+ }
+ }
}
}
return nil
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 86f94477e..f182b6bdf 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -26,7 +26,6 @@ import (
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/secrets"
"github.com/containers/storage/pkg/idtools"
- "github.com/mrunalp/fileutils"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
@@ -677,20 +676,12 @@ func (c *Container) makeBindMounts() error {
// If it doesn't, don't copy them
resolvPath, exists := bindMounts["/etc/resolv.conf"]
if exists {
- resolvDest := filepath.Join(c.state.RunDir, "resolv.conf")
- if err := fileutils.CopyFile(resolvPath, resolvDest); err != nil {
- return errors.Wrapf(err, "error copying resolv.conf from dependency container %s of container %s", depCtr.ID(), c.ID())
- }
- c.state.BindMounts["/etc/resolv.conf"] = resolvDest
- }
+ c.state.BindMounts["/etc/resolv.conf"] = resolvPath
+ }
hostsPath, exists := bindMounts["/etc/hosts"]
if exists {
- hostsDest := filepath.Join(c.state.RunDir, "hosts")
- if err := fileutils.CopyFile(hostsPath, hostsDest); err != nil {
- return errors.Wrapf(err, "error copying hosts file from dependency container %s of container %s", depCtr.ID(), c.ID())
- }
- c.state.BindMounts["/etc/hosts"] = hostsDest
+ c.state.BindMounts["/etc/hosts"] = hostsPath
}
} else {
newResolv, err := c.generateResolvConf()
@@ -705,6 +696,14 @@ func (c *Container) makeBindMounts() error {
}
c.state.BindMounts["/etc/hosts"] = newHosts
}
+
+ if err := label.Relabel(c.state.BindMounts["/etc/hosts"], c.config.MountLabel, true); err != nil {
+ return err
+ }
+
+ if err := label.Relabel(c.state.BindMounts["/etc/resolv.conf"], c.config.MountLabel, true); err != nil {
+ return err
+ }
}
// SHM is always added when we mount the container
@@ -758,8 +757,24 @@ func (c *Container) makeBindMounts() error {
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() (string, error) {
+ resolvConf := "/etc/resolv.conf"
+ for _, ns := range c.config.Spec.Linux.Namespaces {
+ if ns.Type == spec.NetworkNamespace {
+ if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
+ definedPath := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
+ _, err := os.Stat(definedPath)
+ if err == nil {
+ resolvConf = definedPath
+ } else if !os.IsNotExist(err) {
+ return "", errors.Wrapf(err, "failed to stat %s", definedPath)
+ }
+ }
+ break
+ }
+ }
+
// Determine the endpoint for resolv.conf in case it is a symlink
- resolvPath, err := filepath.EvalSymlinks("/etc/resolv.conf")
+ resolvPath, err := filepath.EvalSymlinks(resolvConf)
if err != nil {
return "", err
}
@@ -809,7 +824,7 @@ func (c *Container) generateResolvConf() (string, error) {
}
// Relabel resolv.conf for the container
- if err := label.Relabel(destPath, c.config.MountLabel, false); err != nil {
+ if err := label.Relabel(destPath, c.config.MountLabel, true); err != nil {
return "", err
}
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 028a795ea..b20419d7b 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -5,14 +5,18 @@ import (
"encoding/json"
"fmt"
"io"
+ "os"
"strings"
"syscall"
"time"
types2 "github.com/containernetworking/cni/pkg/types"
cp "github.com/containers/image/copy"
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ ociarchive "github.com/containers/image/oci/archive"
is "github.com/containers/image/storage"
"github.com/containers/image/tarball"
"github.com/containers/image/transports"
@@ -26,8 +30,9 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/reexec"
digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -1084,3 +1089,65 @@ func (i *Image) Comment(ctx context.Context, manifestType string) (string, error
}
return ociv1Img.History[0].Comment, nil
}
+
+// Save writes a container image to the filesystem
+func (i *Image) Save(ctx context.Context, source, format, output string, moreTags []string, quiet, compress bool) error {
+ var (
+ writer io.Writer
+ destRef types.ImageReference
+ manifestType string
+ err error
+ )
+
+ if quiet {
+ writer = os.Stderr
+ }
+ switch format {
+ case "oci-archive":
+ destImageName := imageNameForSaveDestination(i, source)
+ destRef, err = ociarchive.NewReference(output, destImageName) // destImageName may be ""
+ if err != nil {
+ return errors.Wrapf(err, "error getting OCI archive ImageReference for (%q, %q)", output, destImageName)
+ }
+ case "oci-dir":
+ destRef, err = directory.NewReference(output)
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
+ }
+ manifestType = imgspecv1.MediaTypeImageManifest
+ case "docker-dir":
+ destRef, err = directory.NewReference(output)
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
+ }
+ manifestType = manifest.DockerV2Schema2MediaType
+ case "docker-archive", "":
+ dst := output
+ destImageName := imageNameForSaveDestination(i, source)
+ if destImageName != "" {
+ dst = fmt.Sprintf("%s:%s", dst, destImageName)
+ }
+ destRef, err = dockerarchive.ParseReference(dst) // FIXME? Add dockerarchive.NewReference
+ if err != nil {
+ return errors.Wrapf(err, "error getting Docker archive ImageReference for %q", dst)
+ }
+ default:
+ return errors.Errorf("unknown format option %q", format)
+ }
+ // supports saving multiple tags to the same tar archive
+ var additionaltags []reference.NamedTagged
+ if len(moreTags) > 0 {
+ additionaltags, err = GetAdditionalTags(moreTags)
+ if err != nil {
+ return err
+ }
+ }
+ if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil {
+ if err2 := os.Remove(output); err2 != nil {
+ logrus.Errorf("error deleting %q: %v", output, err)
+ }
+ return errors.Wrapf(err, "unable to save %q", source)
+ }
+
+ return nil
+}
diff --git a/libpod/image/search.go b/libpod/image/search.go
new file mode 100644
index 000000000..212eff00b
--- /dev/null
+++ b/libpod/image/search.go
@@ -0,0 +1,277 @@
+package image
+
+import (
+ "context"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/containers/image/docker"
+ "github.com/containers/image/types"
+ "github.com/containers/libpod/libpod/common"
+ sysreg "github.com/containers/libpod/pkg/registries"
+ "github.com/fatih/camelcase"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
+)
+
+const (
+ descriptionTruncLength = 44
+ maxQueries = 25
+ maxParallelSearches = int64(6)
+)
+
+// SearchResult is holding image-search related data.
+type SearchResult struct {
+ // Index is the image index (e.g., "docker.io" or "quay.io")
+ Index string
+ // Name is the canoncical name of the image (e.g., "docker.io/library/alpine").
+ Name string
+ // Description of the image.
+ Description string
+ // Stars is the number of stars of the image.
+ Stars int
+ // Official indicates if it's an official image.
+ Official string
+ // Automated indicates if the image was created by an automated build.
+ Automated string
+}
+
+// SearchOptions are used to control the behaviour of SearchImages.
+type SearchOptions struct {
+ // Filter allows to filter the results.
+ Filter SearchFilter
+ // Limit limits the number of queries per index (default: 25). Must be
+ // greater than 0 to overwrite the default value.
+ Limit int
+ // NoTrunc avoids the output to be truncated.
+ NoTrunc bool
+ // Authfile is the path to the authentication file.
+ Authfile string
+ // InsecureSkipTLSVerify allows to skip TLS verification.
+ InsecureSkipTLSVerify types.OptionalBool
+}
+
+// SearchFilter allows filtering the results of SearchImages.
+type SearchFilter struct {
+ // Stars describes the minimal amount of starts of an image.
+ Stars int
+ // IsAutomated decides if only images from automated builds are displayed.
+ IsAutomated types.OptionalBool
+ // IsOfficial decides if only official images are displayed.
+ IsOfficial types.OptionalBool
+}
+
+func splitCamelCase(src string) string {
+ entries := camelcase.Split(src)
+ return strings.Join(entries, " ")
+}
+
+// HeaderMap returns the headers of a SearchResult.
+func (s *SearchResult) HeaderMap() map[string]string {
+ v := reflect.Indirect(reflect.ValueOf(s))
+ values := make(map[string]string, v.NumField())
+
+ for i := 0; i < v.NumField(); i++ {
+ key := v.Type().Field(i).Name
+ value := key
+ values[key] = strings.ToUpper(splitCamelCase(value))
+ }
+ return values
+}
+
+// SearchImages searches images based on term and the specified SearchOptions
+// in all registries.
+func SearchImages(term string, options SearchOptions) ([]SearchResult, error) {
+ // Check if search term has a registry in it
+ registry, err := sysreg.GetRegistry(term)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting registry from %q", term)
+ }
+ if registry != "" {
+ term = term[len(registry)+1:]
+ }
+
+ registries, err := getRegistries(registry)
+ if err != nil {
+ return nil, err
+ }
+
+ // searchOutputData is used as a return value for searching in parallel.
+ type searchOutputData struct {
+ data []SearchResult
+ err error
+ }
+
+ // Let's follow Firefox by limiting parallel downloads to 6.
+ sem := semaphore.NewWeighted(maxParallelSearches)
+ wg := sync.WaitGroup{}
+ wg.Add(len(registries))
+ data := make([]searchOutputData, len(registries))
+
+ searchImageInRegistryHelper := func(index int, registry string) {
+ defer sem.Release(1)
+ defer wg.Done()
+ searchOutput, err := searchImageInRegistry(term, registry, options)
+ data[index] = searchOutputData{data: searchOutput, err: err}
+ }
+
+ ctx := context.Background()
+ for i := range registries {
+ sem.Acquire(ctx, 1)
+ go searchImageInRegistryHelper(i, registries[i])
+ }
+
+ wg.Wait()
+ results := []SearchResult{}
+ for _, d := range data {
+ if d.err != nil {
+ return nil, d.err
+ }
+ results = append(results, d.data...)
+ }
+ return results, nil
+}
+
+// getRegistries returns the list of registries to search, depending on an optional registry specification
+func getRegistries(registry string) ([]string, error) {
+ var registries []string
+ if registry != "" {
+ registries = append(registries, registry)
+ } else {
+ var err error
+ registries, err = sysreg.GetRegistries()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting registries to search")
+ }
+ }
+ return registries, nil
+}
+
+func searchImageInRegistry(term string, registry string, options SearchOptions) ([]SearchResult, error) {
+ // Max number of queries by default is 25
+ limit := maxQueries
+ if options.Limit > 0 {
+ limit = options.Limit
+ }
+
+ sc := common.GetSystemContext("", options.Authfile, false)
+ sc.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
+ // FIXME: Set this more globally. Probably no reason not to have it in
+ // every types.SystemContext, and to compute the value just once in one
+ // place.
+ sc.SystemRegistriesConfPath = sysreg.SystemRegistriesConfPath()
+ results, err := docker.SearchRegistry(context.TODO(), sc, registry, term, limit)
+ if err != nil {
+ logrus.Errorf("error searching registry %q: %v", registry, err)
+ return []SearchResult{}, nil
+ }
+ index := registry
+ arr := strings.Split(registry, ".")
+ if len(arr) > 2 {
+ index = strings.Join(arr[len(arr)-2:], ".")
+ }
+
+ // limit is the number of results to output
+ // if the total number of results is less than the limit, output all
+ // if the limit has been set by the user, output those number of queries
+ limit = maxQueries
+ if len(results) < limit {
+ limit = len(results)
+ }
+ if options.Limit != 0 && options.Limit < len(results) {
+ limit = options.Limit
+ }
+
+ paramsArr := []SearchResult{}
+ for i := 0; i < limit; i++ {
+ // Check whether query matches filters
+ if !(options.Filter.matchesAutomatedFilter(results[i]) && options.Filter.matchesOfficialFilter(results[i]) && options.Filter.matchesStarFilter(results[i])) {
+ continue
+ }
+ official := ""
+ if results[i].IsOfficial {
+ official = "[OK]"
+ }
+ automated := ""
+ if results[i].IsAutomated {
+ automated = "[OK]"
+ }
+ description := strings.Replace(results[i].Description, "\n", " ", -1)
+ if len(description) > 44 && !options.NoTrunc {
+ description = description[:descriptionTruncLength] + "..."
+ }
+ name := registry + "/" + results[i].Name
+ if index == "docker.io" && !strings.Contains(results[i].Name, "/") {
+ name = index + "/library/" + results[i].Name
+ }
+ params := SearchResult{
+ Index: index,
+ Name: name,
+ Description: description,
+ Official: official,
+ Automated: automated,
+ Stars: results[i].StarCount,
+ }
+ paramsArr = append(paramsArr, params)
+ }
+ return paramsArr, nil
+}
+
+// ParseSearchFilter turns the filter into a SearchFilter that can be used for
+// searching images.
+func ParseSearchFilter(filter []string) (*SearchFilter, error) {
+ sFilter := new(SearchFilter)
+ for _, f := range filter {
+ arr := strings.Split(f, "=")
+ switch arr[0] {
+ case "stars":
+ if len(arr) < 2 {
+ return nil, errors.Errorf("invalid `stars` filter %q, should be stars=<value>", filter)
+ }
+ stars, err := strconv.Atoi(arr[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "incorrect value type for stars filter")
+ }
+ sFilter.Stars = stars
+ break
+ case "is-automated":
+ if len(arr) == 2 && arr[1] == "false" {
+ sFilter.IsAutomated = types.OptionalBoolFalse
+ } else {
+ sFilter.IsAutomated = types.OptionalBoolTrue
+ }
+ break
+ case "is-official":
+ if len(arr) == 2 && arr[1] == "false" {
+ sFilter.IsOfficial = types.OptionalBoolFalse
+ } else {
+ sFilter.IsOfficial = types.OptionalBoolTrue
+ }
+ break
+ default:
+ return nil, errors.Errorf("invalid filter type %q", f)
+ }
+ }
+ return sFilter, nil
+}
+
+func (f *SearchFilter) matchesStarFilter(result docker.SearchResult) bool {
+ return result.StarCount >= f.Stars
+}
+
+func (f *SearchFilter) matchesAutomatedFilter(result docker.SearchResult) bool {
+ if f.IsAutomated != types.OptionalBoolUndefined {
+ return result.IsAutomated == (f.IsAutomated == types.OptionalBoolTrue)
+ }
+ return true
+}
+
+func (f *SearchFilter) matchesOfficialFilter(result docker.SearchResult) bool {
+ if f.IsOfficial != types.OptionalBoolUndefined {
+ return result.IsOfficial == (f.IsOfficial == types.OptionalBoolTrue)
+ }
+ return true
+}
diff --git a/libpod/image/utils.go b/libpod/image/utils.go
index 3585428ad..544796a4b 100644
--- a/libpod/image/utils.go
+++ b/libpod/image/utils.go
@@ -1,6 +1,7 @@
package image
import (
+ "fmt"
"io"
"net/url"
"regexp"
@@ -148,3 +149,28 @@ func IsValidImageURI(imguri string) (bool, error) {
}
return true, nil
}
+
+// imageNameForSaveDestination returns a Docker-like reference appropriate for saving img,
+// which the user referred to as imgUserInput; or an empty string, if there is no appropriate
+// reference.
+func imageNameForSaveDestination(img *Image, imgUserInput string) string {
+ if strings.Contains(img.ID(), imgUserInput) {
+ return ""
+ }
+
+ prepend := ""
+ localRegistryPrefix := fmt.Sprintf("%s/", DefaultLocalRegistry)
+ if !strings.HasPrefix(imgUserInput, localRegistryPrefix) {
+ // we need to check if localhost was added to the image name in NewFromLocal
+ for _, name := range img.Names() {
+ // If the user is saving an image in the localhost registry, getLocalImage need
+ // a name that matches the format localhost/<tag1>:<tag2> or localhost/<tag>:latest to correctly
+ // set up the manifest and save.
+ if strings.HasPrefix(name, localRegistryPrefix) && (strings.HasSuffix(name, imgUserInput) || strings.HasSuffix(name, fmt.Sprintf("%s:latest", imgUserInput))) {
+ prepend = localRegistryPrefix
+ break
+ }
+ }
+ }
+ return fmt.Sprintf("%s%s", prepend, imgUserInput)
+}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 314799309..ab4fc8ba7 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -378,6 +378,58 @@ func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) {
return arr, nil
}
+// AllContainers retrieves all containers from the state
+func (s *InMemoryState) AllContainers() ([]*Container, error) {
+ ctrs := make([]*Container, 0, len(s.containers))
+ for _, ctr := range s.containers {
+ if s.namespace == "" || ctr.config.Namespace == s.namespace {
+ ctrs = append(ctrs, ctr)
+ }
+ }
+
+ return ctrs, nil
+}
+
+// RewriteContainerConfig rewrites a container's configuration.
+// This function is DANGEROUS, even with an in-memory state.
+// Please read the full comment on it in state.go before using it.
+func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ // If the container does not exist, return error
+ stateCtr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ }
+
+ stateCtr.config = newCfg
+
+ return nil
+}
+
+// RewritePodConfig rewrites a pod's configuration.
+// This function is DANGEROUS, even with in-memory state.
+// Please read the full comment on it in state.go before using it.
+func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
+ if !pod.valid {
+ return ErrPodRemoved
+ }
+
+ // If the pod does not exist, return error
+ statePod, ok := s.pods[pod.ID()]
+ if !ok {
+ pod.valid = false
+ return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found in state", pod.ID())
+ }
+
+ statePod.config = newCfg
+
+ return nil
+}
+
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
@@ -486,18 +538,6 @@ func (s *InMemoryState) AllVolumes() ([]*Volume, error) {
return allVols, nil
}
-// AllContainers retrieves all containers from the state
-func (s *InMemoryState) AllContainers() ([]*Container, error) {
- ctrs := make([]*Container, 0, len(s.containers))
- for _, ctr := range s.containers {
- if s.namespace == "" || ctr.config.Namespace == s.namespace {
- ctrs = append(ctrs, ctr)
- }
- }
-
- return ctrs, nil
-}
-
// Pod retrieves a pod from the state from its full ID
func (s *InMemoryState) Pod(id string) (*Pod, error) {
if id == "" {
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index db8f20e95..7c9605917 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -89,3 +89,14 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
+
+// FreeAllLocks frees all locks.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *InMemoryManager) FreeAllLocks() error {
+ for _, lock := range m.locks {
+ lock.allocated = false
+ }
+
+ return nil
+}
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index 1f94171fe..d6841646b 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -24,6 +24,20 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
+ // FreeAllLocks frees all allocated locks, in preparation for lock
+ // reallocation.
+ // As this deallocates all presently-held locks, this can be very
+ // dangerous - if there are other processes running that might be
+ // attempting to allocate new locks and free existing locks, we may
+ // encounter races leading to an inconsistent state.
+ // (This is in addition to the fact that FreeAllLocks instantly makes
+ // the state inconsistent simply by using it, and requires a full
+ // lock renumbering to restore consistency!).
+ // In short, this should only be used as part of unit tests, or lock
+ // renumbering, where reasonable guarantees about other processes can be
+ // made.
+ FreeAllLocks() error
}
// Locker is similar to sync.Locker, but provides a method for freeing the lock
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index 4af58d857..d11fce71a 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -203,6 +203,8 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// terminating NULL byte.
// Returns a valid pointer on success or NULL on error.
// If an error occurs, negative ERRNO values will be written to error_code.
+// ERANGE is returned for a mismatch between num_locks and the number of locks
+// available in the the SHM lock struct.
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
int shm_fd;
shm_struct_t *shm;
@@ -255,11 +257,11 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// Need to check the SHM to see if it's actually our locks
if (shm->magic != MAGIC) {
- *error_code = -1 * errno;
+ *error_code = -1 * EBADF;
goto CLEANUP;
}
if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) {
- *error_code = -1 * errno;
+ *error_code = -1 * ERANGE;
goto CLEANUP;
}
@@ -407,6 +409,36 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
return 0;
}
+// Deallocate all semaphores unconditionally.
+// Returns negative ERRNO values.
+int32_t deallocate_all_semaphores(shm_struct_t *shm) {
+ int ret_code;
+ uint i;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Iterate through all bitmaps and reset to unused
+ for (i = 0; i < shm->num_bitmaps; i++) {
+ shm->locks[i].bitmap = 0;
+ }
+
+ // Unlock the allocation control mutex
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
// Lock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index 87d28e5c1..e70ea8743 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -155,6 +155,22 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
return nil
}
+// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
+// other containers and pods.
+func (locks *SHMLocks) DeallocateAllSemaphores() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.deallocate_all_semaphores(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno return from C
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
// LockSemaphore locks the given semaphore.
// If the semaphore is already locked, LockSemaphore will block until the lock
// can be acquired.
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 8e7e23fb7..58e4297e2 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -40,6 +40,7 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go
index 594eb5d8e..830035881 100644
--- a/libpod/lock/shm/shm_lock_test.go
+++ b/libpod/lock/shm/shm_lock_test.go
@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"runtime"
- "syscall"
"testing"
"time"
@@ -53,12 +52,8 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
}
defer func() {
// Deallocate all locks
- // Ignore ENOENT (lock is not allocated)
- var i uint32
- for i = 0; i < numLocks; i++ {
- if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
- t.Fatalf("Error deallocating semaphore %d: %v", i, err)
- }
+ if err := locks.DeallocateAllSemaphores(); err != nil {
+ t.Fatalf("Error deallocating semaphores: %v", err)
}
if err := locks.Close(); err != nil {
@@ -212,6 +207,25 @@ func TestAllocateDeallocateCycle(t *testing.T) {
})
}
+// Test that DeallocateAllSemaphores deallocates all semaphores
+func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate a lock
+ locks1, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ // Free all locks
+ err = locks.DeallocateAllSemaphores()
+ assert.NoError(t, err)
+
+ // Allocate another lock
+ locks2, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ assert.Equal(t, locks1, locks2)
+ })
+}
+
// Test that locks actually lock
func TestLockSemaphoreActuallyLocks(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 94dfd7dd7..8678958ee 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -71,6 +71,13 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
return lock, nil
}
+// FreeAllLocks frees all locks in the manager.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *SHMLockManager) FreeAllLocks() error {
+ return m.locks.DeallocateAllSemaphores()
+}
+
// SHMLock is an individual shared memory lock.
type SHMLock struct {
lockID uint32
diff --git a/libpod/lock/shm_lock_manager_unsupported.go b/libpod/lock/shm_lock_manager_unsupported.go
index cbdb2f7bc..1d6e3fcbd 100644
--- a/libpod/lock/shm_lock_manager_unsupported.go
+++ b/libpod/lock/shm_lock_manager_unsupported.go
@@ -27,3 +27,8 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) {
return nil, fmt.Errorf("not supported")
}
+
+// FreeAllLocks is not supported on this platform
+func (m *SHMLockManager) FreeAllLocks() error {
+ return fmt.Errorf("not supported")
+}
diff --git a/libpod/options.go b/libpod/options.go
index 06737776b..e22c81f91 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -149,6 +149,7 @@ func WithOCIRuntime(runtime string) RuntimeOption {
}
rt.config.OCIRuntime = runtime
+ rt.config.RuntimePath = nil
return nil
}
@@ -394,6 +395,22 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
}
}
+// WithRenumber instructs libpod to perform a lock renumbering while
+// initializing. This will handle migrations from early versions of libpod with
+// file locks to newer versions with SHM locking, as well as changes in the
+// number of configured locks.
+func WithRenumber() RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.doRenumber = true
+
+ return nil
+ }
+}
+
// Container Creation Options
// WithShmDir sets the directory that should be mounted on /dev/shm.
@@ -887,10 +904,10 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmo
}
ctr.config.PostConfigureNetNS = postConfigureNetNS
- ctr.config.CreateNetNS = true
+ ctr.config.NetMode = namespaces.NetworkMode(netmode)
+ ctr.config.CreateNetNS = !ctr.config.NetMode.IsUserDefined()
ctr.config.PortMappings = portMappings
ctr.config.Networks = networks
- ctr.config.NetMode = namespaces.NetworkMode(netmode)
return nil
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 4f5d1e292..94dbf37dd 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -7,6 +7,7 @@ import (
"os/exec"
"path/filepath"
"sync"
+ "syscall"
"github.com/BurntSushi/toml"
is "github.com/containers/image/storage"
@@ -79,7 +80,8 @@ type RuntimeOption func(*Runtime) error
// Runtime is the core libpod runtime
type Runtime struct {
- config *RuntimeConfig
+ config *RuntimeConfig
+
state State
store storage.Store
storageService *storageService
@@ -88,12 +90,23 @@ type Runtime struct {
netPlugin ocicni.CNIPlugin
ociRuntimePath OCIRuntimePath
conmonPath string
- valid bool
- lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
lockManager lock.Manager
configuredFrom *runtimeConfiguredFrom
+
+ // doRenumber indicates that the runtime should perform a lock renumber
+ // during initialization.
+ // Once the runtime has been initialized and returned, this variable is
+ // unused.
+ doRenumber bool
+
+ // valid indicates whether the runtime is ready to use.
+ // valid is set to true when a runtime is returned from GetRuntime(),
+ // and remains true until the runtime is shut down (rendering its
+ // storage unusable). When valid is false, the runtime cannot be used.
+ valid bool
+ lock sync.RWMutex
}
// OCIRuntimePath contains information about an OCI runtime.
@@ -753,6 +766,7 @@ func makeRuntime(runtime *Runtime) (err error) {
aliveLock.Unlock()
}
}()
+
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If the file doesn't exist, we need to refresh the state
@@ -778,12 +792,35 @@ func makeRuntime(runtime *Runtime) (err error) {
if err != nil {
return err
}
+ } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
+ logrus.Debugf("Number of locks does not match - removing old locks")
+
+ // ERANGE indicates a lock numbering mismatch.
+ // Since we're renumbering, this is not fatal.
+ // Remove the earlier set of locks and recreate.
+ if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
+ return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ }
+
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return err
+ }
} else {
return err
}
}
runtime.lockManager = manager
+ // If we're renumbering locks, do it now.
+ // It breaks out of normal runtime init, and will not return a valid
+ // runtime.
+ if runtime.doRenumber {
+ if err := runtime.renumberLocks(); err != nil {
+ return err
+ }
+ }
+
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 1055da75b..2ec8d0795 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -421,7 +421,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
for _, v := range volumes {
if volume, err := runtime.state.Volume(v); err == nil {
- if err := runtime.removeVolume(ctx, volume, false, true); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
+ if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
logrus.Errorf("cleanup volume (%s): %v", v, err)
}
}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 1e9689362..451c2ebe7 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -3,7 +3,6 @@ package libpod
import (
"context"
"fmt"
- "github.com/opencontainers/image-spec/specs-go/v1"
"io"
"io/ioutil"
"net/http"
@@ -15,6 +14,11 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
"github.com/pkg/errors"
+
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
+ ociarchive "github.com/containers/image/oci/archive"
+ "github.com/opencontainers/image-spec/specs-go/v1"
)
// Runtime API
@@ -211,3 +215,41 @@ func downloadFromURL(source string) (string, error) {
return outFile.Name(), nil
}
+
+// LoadImage loads a container image into local storage
+func (r *Runtime) LoadImage(ctx context.Context, name, inputFile string, writer io.Writer, signaturePolicy string) (string, error) {
+ var newImages []*image.Image
+ src, err := dockerarchive.ParseReference(inputFile) // FIXME? We should add dockerarchive.NewReference()
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ // generate full src name with specified image:tag
+ src, err := ociarchive.NewReference(inputFile, name) // imageName may be ""
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ src, err := directory.NewReference(inputFile)
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ return "", errors.Wrapf(err, "error pulling %q", name)
+ }
+ }
+ }
+ return getImageNames(newImages), nil
+}
+
+func getImageNames(images []*image.Image) string {
+ var names string
+ for i := range images {
+ if i == 0 {
+ names = images[i].InputName
+ } else {
+ names += ", " + images[i].InputName
+ }
+ }
+ return names
+}
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
new file mode 100644
index 000000000..125cf0825
--- /dev/null
+++ b/libpod/runtime_renumber.go
@@ -0,0 +1,57 @@
+package libpod
+
+import (
+ "github.com/pkg/errors"
+)
+
+// renumberLocks reassigns lock numbers for all containers and pods in the
+// state.
+// TODO: It would be desirable to make it impossible to call this until all
+// other libpod sessions are dead.
+// Possibly use a read-write file lock, with all non-renumber podmans owning the
+// lock as read, renumber attempting to take a write lock?
+// The alternative is some sort of session tracking, and I don't know how
+// reliable that can be.
+func (r *Runtime) renumberLocks() error {
+ // Start off by deallocating all locks
+ if err := r.lockManager.FreeAllLocks(); err != nil {
+ return err
+ }
+
+ allCtrs, err := r.state.AllContainers()
+ if err != nil {
+ return err
+ }
+ for _, ctr := range allCtrs {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
+ }
+
+ ctr.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
+ return err
+ }
+ }
+ allPods, err := r.state.AllPods()
+ if err != nil {
+ return err
+ }
+ for _, pod := range allPods {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
+ }
+
+ pod.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewritePodConfig(pod, pod.config); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index beae50ac9..11f37ad4b 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -19,7 +19,7 @@ type VolumeCreateOption func(*Volume) error
type VolumeFilter func(*Volume) bool
// RemoveVolume removes a volumes
-func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
@@ -35,10 +35,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- v.lock.Lock()
- defer v.lock.Unlock()
-
- return r.removeVolume(ctx, v, force, prune)
+ return r.removeVolume(ctx, v, force)
}
// RemoveVolumes removes a slice of volumes or all with a force bool
@@ -64,7 +61,7 @@ func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, forc
}
for _, vol := range vols {
- if err := r.RemoveVolume(ctx, vol, force, false); err != nil {
+ if err := r.RemoveVolume(ctx, vol, force); err != nil {
return deletedVols, err
}
logrus.Debugf("removed volume %s", vol.Name())
@@ -168,8 +165,8 @@ func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) {
}
for _, vol := range vols {
- if err := r.RemoveVolume(ctx, vol, false, true); err != nil {
- if err != ErrVolumeBeingUsed {
+ if err := r.RemoveVolume(ctx, vol, false); err != nil {
+ if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved {
pruneErrors = append(pruneErrors, err)
}
continue
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 0727cfedf..838c0167a 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
- lock, err := r.lockManager.AllocateLock()
- if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new volume")
- }
- volume.lock = lock
- volume.config.LockID = volume.lock.ID()
-
volume.valid = true
// Add the volume to state
@@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
-func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
- return ErrNoSuchVolume
+ if ok, _ := r.state.HasVolume(v.Name()); !ok {
+ return nil
+ }
+ return ErrVolumeRemoved
}
deps, err := r.state.VolumeInUse(v)
@@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
return err
}
if len(deps) != 0 {
- if prune {
- return ErrVolumeBeingUsed
- }
depsStr := strings.Join(deps, ", ")
if !force {
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
@@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
- if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
- }
+ // Set volume as invalid so it can no longer be used
+ v.valid = false
// Remove the volume from the state
if err := r.state.RemoveVolume(v); err != nil {
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Set volume as invalid so it can no longer be used
- v.valid = false
+ // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ if err := v.teardownStorage(); err != nil {
+ return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ }
+
+ logrus.Debugf("Removed volume %s", v.Name())
return nil
}
diff --git a/libpod/state.go b/libpod/state.go
index 88d89f673..98282fc83 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -97,6 +97,30 @@ type State interface {
// returned.
AllContainers() ([]*Container, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
+ // Rewrite a container's configuration.
+ // This function breaks libpod's normal prohibition on a read-only
+ // configuration, and as such should be used EXTREMELY SPARINGLY and
+ // only in very specific circumstances.
+ // Specifically, it is ONLY safe to use thing function to make changes
+ // that result in a functionally identical configuration (migrating to
+ // newer, but identical, configuration fields), or during libpod init
+ // WHILE HOLDING THE ALIVE LOCK (to prevent other libpod instances from
+ // being initialized).
+ // Most things in config can be changed by this, but container ID and
+ // name ABSOLUTELY CANNOT BE ALTERED. If you do so, there is a high
+ // potential for database corruption.
+ // There are a lot of capital letters and conditions here, but the short
+ // answer is this: use this only very sparingly, and only if you really
+ // know what you're doing.
+ RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
+ // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING.
+ // This function is identical to RewriteContainerConfig, save for the
+ // fact that it is used with pods instead.
+ // It is subject to the same conditions as RewriteContainerConfig.
+ // Please do not use this unless you know what you're doing.
+ RewritePodConfig(pod *Pod, newCfg *PodConfig) error
+
// Accepts full ID of pod.
// If the pod given is not in the set namespace, an error will be
// returned.
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 4bd00ab55..be68a2d69 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -1298,6 +1298,78 @@ func TestCannotUseBadIDAsGenericDependency(t *testing.T) {
})
}
+func TestRewriteContainerConfigDoesNotExist(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ err := state.RewriteContainerConfig(&Container{}, &ContainerConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewriteContainerConfigNotInState(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+ err = state.RewriteContainerConfig(testCtr, &ContainerConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewriteContainerConfigRewritesConfig(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ err = state.AddContainer(testCtr)
+ assert.NoError(t, err)
+
+ testCtr.config.LogPath = "/another/path/"
+
+ err = state.RewriteContainerConfig(testCtr, testCtr.config)
+ assert.NoError(t, err)
+
+ testCtrFromState, err := state.Container(testCtr.ID())
+ assert.NoError(t, err)
+
+ testContainersEqual(t, testCtrFromState, testCtr, true)
+ })
+}
+
+func TestRewritePodConfigDoesNotExist(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ err := state.RewritePodConfig(&Pod{}, &PodConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewritePodConfigNotInState(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
+ assert.NoError(t, err)
+ err = state.RewritePodConfig(testPod, &PodConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewritePodConfigRewritesConfig(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
+ assert.NoError(t, err)
+
+ err = state.AddPod(testPod)
+ assert.NoError(t, err)
+
+ testPod.config.CgroupParent = "/another_cgroup_parent"
+
+ err = state.RewritePodConfig(testPod, testPod.config)
+ assert.NoError(t, err)
+
+ testPodFromState, err := state.Pod(testPod.ID())
+ assert.NoError(t, err)
+
+ testPodsEqual(t, testPodFromState, testPod, true)
+ })
+}
+
func TestGetPodDoesNotExist(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Pod("doesnotexist")
diff --git a/libpod/volume.go b/libpod/volume.go
index 026a3bf49..74878b6a4 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -1,7 +1,5 @@
package libpod
-import "github.com/containers/libpod/libpod/lock"
-
// Volume is the type used to create named volumes
// TODO: all volumes should be created using this and the Volume API
type Volume struct {
@@ -9,7 +7,6 @@ type Volume struct {
valid bool
runtime *Runtime
- lock lock.Locker
}
// VolumeConfig holds the volume's config information
@@ -17,8 +14,6 @@ type Volume struct {
type VolumeConfig struct {
// Name of the volume
Name string `json:"name"`
- // ID of this volume's lock
- LockID uint32 `json:"lockID"`
Labels map[string]string `json:"labels"`
MountPoint string `json:"mountPoint"`
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 0de8a2350..35f0ca19d 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -18,8 +18,5 @@ func newVolume(runtime *Runtime) (*Volume, error) {
// teardownStorage deletes the volume from volumePath
func (v *Volume) teardownStorage() error {
- if !v.valid {
- return ErrNoSuchVolume
- }
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
}