summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go1
-rw-r--r--libpod/boltdb_state_internal.go15
-rw-r--r--libpod/container.go6
-rw-r--r--libpod/container_api.go26
-rw-r--r--libpod/container_exec.go10
-rw-r--r--libpod/container_inspect.go9
-rw-r--r--libpod/container_internal.go94
-rw-r--r--libpod/container_internal_linux.go213
-rw-r--r--libpod/container_log.go2
-rw-r--r--libpod/define/container_inspect.go9
-rw-r--r--libpod/define/containerstate.go5
-rw-r--r--libpod/define/errors.go17
-rw-r--r--libpod/define/volume_inspect.go51
-rw-r--r--libpod/driver/driver.go37
-rw-r--r--libpod/events/journal_linux.go7
-rw-r--r--libpod/image/image.go21
-rw-r--r--libpod/image/prune.go25
-rw-r--r--libpod/image/pull.go7
-rw-r--r--libpod/image/utils.go3
-rw-r--r--libpod/in_memory_state.go20
-rw-r--r--libpod/network/netconflist.go2
-rw-r--r--libpod/networking_linux.go2
-rw-r--r--libpod/oci.go4
-rw-r--r--libpod/oci_attach_linux.go29
-rw-r--r--libpod/oci_attach_linux_cgo.go11
-rw-r--r--libpod/oci_attach_linux_nocgo.go7
-rw-r--r--libpod/oci_conmon_exec_linux.go12
-rw-r--r--libpod/oci_conmon_linux.go77
-rw-r--r--libpod/oci_util.go12
-rw-r--r--libpod/options.go15
-rw-r--r--libpod/plugin/volume_api.go55
-rw-r--r--libpod/rootless_cni_linux.go2
-rw-r--r--libpod/runtime.go16
-rw-r--r--libpod/runtime_ctr.go176
-rw-r--r--libpod/runtime_img.go7
-rw-r--r--libpod/runtime_pod_infra_linux.go28
-rw-r--r--libpod/runtime_volume.go25
-rw-r--r--libpod/runtime_volume_linux.go130
-rw-r--r--libpod/volume.go71
-rw-r--r--libpod/volume_inspect.go87
-rw-r--r--libpod/volume_internal.go13
-rw-r--r--libpod/volume_internal_linux.go46
42 files changed, 1085 insertions, 320 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index dcb2ff751..b2ee63b08 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -304,6 +304,7 @@ func (s *BoltState) Refresh() error {
// Reset mount count to 0
oldState.MountCount = 0
+ oldState.MountPoint = ""
newState, err := json.Marshal(oldState)
if err != nil {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index c06fedd3e..6014fbef3 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -497,6 +497,21 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
}
}
+ // Retrieve volume driver
+ if volume.UsesVolumeDriver() {
+ plugin, err := s.runtime.getVolumePlugin(volume.config.Driver)
+ if err != nil {
+ // We want to fail gracefully here, to ensure that we
+ // can still remove volumes even if their plugin is
+ // missing. Otherwise, we end up with volumes that
+ // cannot even be retrieved from the database and will
+ // cause things like `volume ls` to fail.
+ logrus.Errorf("Volume %s uses volume plugin %s, but it cannot be accessed - some functionality may not be available: %v", volume.Name(), volume.config.Driver, err)
+ } else {
+ volume.plugin = plugin
+ }
+ }
+
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
if err != nil {
diff --git a/libpod/container.go b/libpod/container.go
index 96a21736c..58bf95470 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -1012,6 +1012,9 @@ func (c *Container) IDMappings() (storage.IDMappingOptions, error) {
// RootUID returns the root user mapping from container
func (c *Container) RootUID() int {
+ if len(c.config.IDMappings.UIDMap) == 1 && c.config.IDMappings.UIDMap[0].Size == 1 {
+ return c.config.IDMappings.UIDMap[0].HostID
+ }
for _, uidmap := range c.config.IDMappings.UIDMap {
if uidmap.ContainerID == 0 {
return uidmap.HostID
@@ -1022,6 +1025,9 @@ func (c *Container) RootUID() int {
// RootGID returns the root user mapping from container
func (c *Container) RootGID() int {
+ if len(c.config.IDMappings.GIDMap) == 1 && c.config.IDMappings.GIDMap[0].Size == 1 {
+ return c.config.IDMappings.GIDMap[0].HostID
+ }
for _, gidmap := range c.config.IDMappings.GIDMap {
if gidmap.ContainerID == 0 {
return gidmap.HostID
diff --git a/libpod/container_api.go b/libpod/container_api.go
index c3e1a23d2..0d62a2dd7 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -210,7 +210,13 @@ func (c *Container) Kill(signal uint) error {
}
// TODO: Is killing a paused container OK?
- if c.state.State != define.ContainerStateRunning {
+ switch c.state.State {
+ case define.ContainerStateRunning, define.ContainerStateStopping:
+ // Note that killing containers in "stopping" state is okay.
+ // In that state, the Podman is waiting for the runtime to
+ // stop the container and if that is taking too long, a user
+ // may have decided to kill the container after all.
+ default:
return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}
@@ -539,7 +545,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
}
// Check if state is good
- if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) {
+ if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
@@ -703,6 +709,16 @@ type ContainerCheckpointOptions struct {
// important to be able to restore a container multiple
// times with '--import --name'.
IgnoreStaticMAC bool
+ // IgnoreVolumes tells the API to not export or not to import
+ // the content of volumes associated with the container
+ IgnoreVolumes bool
+ // Pre Checkpoint container and leave container running
+ PreCheckPoint bool
+ // Dump container with Pre Checkpoint images
+ WithPrevious bool
+ // ImportPrevious tells the API to restore container with two
+ // images. One is TargetFile, the other is ImportPrevious.
+ ImportPrevious string
}
// Checkpoint checkpoints a container
@@ -715,6 +731,12 @@ func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointO
}
}
+ if options.WithPrevious {
+ if err := c.canWithPrevious(); err != nil {
+ return err
+ }
+ }
+
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index fce26acb0..5aee847e1 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -8,7 +8,6 @@ import (
"strconv"
"time"
- "github.com/containers/common/pkg/capabilities"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/events"
"github.com/containers/storage/pkg/stringid"
@@ -973,20 +972,12 @@ func (c *Container) removeAllExecSessions() error {
// Make an ExecOptions struct to start the OCI runtime and prepare its exec
// bundle.
func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) {
- // TODO: check logic here - should we set Privileged if the container is
- // privileged?
- var capList []string
- if session.Config.Privileged || c.config.Privileged {
- capList = capabilities.AllCapabilities()
- }
-
if err := c.createExecBundle(session.ID()); err != nil {
return nil, err
}
opts := new(ExecOptions)
opts.Cmd = session.Config.Command
- opts.CapAdd = capList
opts.Env = session.Config.Environment
opts.Terminal = session.Config.Terminal
opts.Cwd = session.Config.WorkDir
@@ -995,6 +986,7 @@ func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) {
opts.DetachKeys = session.Config.DetachKeys
opts.ExitCommand = session.Config.ExitCommand
opts.ExitCommandDelay = session.Config.ExitCommandDelay
+ opts.Privileged = session.Config.Privileged
return opts, nil
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 51474471b..ac7eae56b 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -49,7 +49,7 @@ func (c *Container) Inspect(size bool) (*define.InspectContainerData, error) {
return c.inspectLocked(size)
}
-func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*define.InspectContainerData, error) {
+func (c *Container) getContainerInspectData(size bool, driverData *define.DriverData) (*define.InspectContainerData, error) {
config := c.config
runtimeInfo := c.state
ctrSpec, err := c.specFromState()
@@ -212,7 +212,12 @@ func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, image
return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
}
mountStruct.Driver = volFromDB.Driver()
- mountStruct.Source = volFromDB.MountPoint()
+
+ mountPoint, err := volFromDB.MountPoint()
+ if err != nil {
+ return nil, err
+ }
+ mountStruct.Source = mountPoint
parseMountOptionsForInspect(volume.Options, &mountStruct)
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 540230c26..b9ea50783 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "bufio"
"bytes"
"context"
"fmt"
@@ -134,6 +135,11 @@ func (c *Container) CheckpointPath() string {
return filepath.Join(c.bundlePath(), "checkpoint")
}
+// PreCheckpointPath returns the path to the directory containing the pre-checkpoint-images
+func (c *Container) PreCheckPointPath() string {
+ return filepath.Join(c.bundlePath(), "pre-checkpoint")
+}
+
// AttachSocketPath retrieves the path of the container's attach socket
func (c *Container) AttachSocketPath() (string, error) {
return c.ociRuntime.AttachSocketPath(c)
@@ -758,7 +764,7 @@ func (c *Container) isStopped() (bool, error) {
return true, err
}
- return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused), nil
+ return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopping), nil
}
// save container state to the database
@@ -1284,10 +1290,49 @@ func (c *Container) stop(timeout uint) error {
return err
}
+ // Set the container state to "stopping" and unlock the container
+ // before handing it over to conmon to unblock other commands. #8501
+ // demonstrates nicely that a high stop timeout will block even simple
+ // commands such as `podman ps` from progressing if the container lock
+ // is held when busy-waiting for the container to be stopped.
+ c.state.State = define.ContainerStateStopping
+ if err := c.save(); err != nil {
+ return errors.Wrapf(err, "error saving container %s state before stopping", c.ID())
+ }
+ if !c.batched {
+ c.lock.Unlock()
+ }
+
if err := c.ociRuntime.StopContainer(c, timeout, all); err != nil {
return err
}
+ if !c.batched {
+ c.lock.Lock()
+ if err := c.syncContainer(); err != nil {
+ switch errors.Cause(err) {
+ // If the container has already been removed (e.g., via
+ // the cleanup process), there's nothing left to do.
+ case define.ErrNoSuchCtr, define.ErrCtrRemoved:
+ return nil
+ default:
+ return err
+ }
+ }
+ }
+
+ // Since we're now subject to a race condition with other processes who
+ // may have altered the state (and other data), let's check if the
+ // state has changed. If so, we should return immediately and log a
+ // warning.
+ if c.state.State != define.ContainerStateStopping {
+ logrus.Warnf(
+ "Container %q state changed from %q to %q while waiting for it to be stopped: discontinuing stop procedure as another process interfered",
+ c.ID(), define.ContainerStateStopping, c.state.State,
+ )
+ return nil
+ }
+
c.newContainerEvent(events.Stop)
c.state.PID = 0
@@ -1535,8 +1580,18 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
return nil, err
}
+ // HACK HACK HACK - copy up into a volume driver is 100% broken
+ // right now.
+ if vol.UsesVolumeDriver() {
+ logrus.Infof("Not copying up into volume %s as it uses a volume driver", vol.Name())
+ return vol, nil
+ }
+
// If the volume is not empty, we should not copy up.
- volMount := vol.MountPoint()
+ volMount, err := vol.MountPoint()
+ if err != nil {
+ return nil, err
+ }
contents, err := ioutil.ReadDir(volMount)
if err != nil {
return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID())
@@ -1574,7 +1629,11 @@ func (c *Container) chownVolume(volumeName string) error {
return err
}
- if vol.state.NeedsChown {
+ // TODO: For now, I've disabled chowning volumes owned by non-Podman
+ // drivers. This may be safe, but it's really going to be a case-by-case
+ // thing, I think - safest to leave disabled now and reenable later if
+ // there is a demand.
+ if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
vol.state.NeedsChown = false
uid := int(c.config.Spec.Process.User.UID)
@@ -1601,7 +1660,10 @@ func (c *Container) chownVolume(volumeName string) error {
return err
}
- mountPoint := vol.MountPoint()
+ mountPoint, err := vol.MountPoint()
+ if err != nil {
+ return err
+ }
if err := os.Lchown(mountPoint, uid, gid); err != nil {
return err
@@ -1860,16 +1922,26 @@ func (c *Container) writeStringToStaticDir(filename, contents string) (string, e
return destFileName, nil
}
-// appendStringToRundir appends the provided string to the runtimedir file
-func (c *Container) appendStringToRundir(destFile, output string) (string, error) {
+// appendStringToRunDir appends the provided string to the runtimedir file
+func (c *Container) appendStringToRunDir(destFile, output string) (string, error) {
destFileName := filepath.Join(c.state.RunDir, destFile)
- f, err := os.OpenFile(destFileName, os.O_APPEND|os.O_WRONLY, 0600)
+ f, err := os.OpenFile(destFileName, os.O_APPEND|os.O_RDWR, 0600)
if err != nil {
return "", err
}
defer f.Close()
+ compareStr := strings.TrimRight(output, "\n")
+ scanner := bufio.NewScanner(f)
+ scanner.Split(bufio.ScanLines)
+
+ for scanner.Scan() {
+ if strings.Compare(scanner.Text(), compareStr) == 0 {
+ return filepath.Join(c.state.RunDir, destFile), nil
+ }
+ }
+
if _, err := f.WriteString(output); err != nil {
return "", errors.Wrapf(err, "unable to write %s", destFileName)
}
@@ -2023,6 +2095,12 @@ func (c *Container) checkReadyForRemoval() error {
return nil
}
+// canWithPrevious return the stat of the preCheckPoint dir
+func (c *Container) canWithPrevious() error {
+ _, err := os.Stat(c.PreCheckPointPath())
+ return err
+}
+
// writeJSONFile marshalls and writes the given data to a JSON file
// in the bundle path
func (c *Container) writeJSONFile(v interface{}, file string) error {
@@ -2094,7 +2172,7 @@ func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume
// Check for an exit file, and handle one if present
func (c *Container) checkExitFile() error {
// If the container's not running, nothing to do.
- if !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
+ if !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopping) {
return nil
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 05b149e03..0553cc59c 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -40,7 +40,6 @@ import (
"github.com/containers/storage/pkg/idtools"
securejoin "github.com/cyphar/filepath-securejoin"
runcuser "github.com/opencontainers/runc/libcontainer/user"
- "github.com/opencontainers/runtime-spec/specs-go"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -284,7 +283,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
return nil, err
}
- g := generate.NewFromSpec(c.config.Spec)
+ g := generate.Generator{Config: c.config.Spec}
// If network namespace was requested, add it now
if c.config.CreateNetNS {
@@ -342,7 +341,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if err != nil {
return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID())
}
- mountPoint := volume.MountPoint()
+ mountPoint, err := volume.MountPoint()
+ if err != nil {
+ return nil, err
+ }
volMount := spec.Mount{
Type: "bind",
Source: mountPoint,
@@ -400,7 +402,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
return nil, errors.Wrapf(err, "failed to create TempDir in the %s directory", c.config.StaticDir)
}
- var overlayMount specs.Mount
+ var overlayMount spec.Mount
if volume.ReadWrite {
overlayMount, err = overlay.Mount(contentDir, mountPoint, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
} else {
@@ -529,14 +531,45 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
}
+ availableUIDs, availableGIDs, err := rootless.GetAvailableIDMaps()
+ if err != nil {
+ return nil, err
+ }
+ g.Config.Linux.UIDMappings = rootless.MaybeSplitMappings(g.Config.Linux.UIDMappings, availableUIDs)
+ g.Config.Linux.GIDMappings = rootless.MaybeSplitMappings(g.Config.Linux.GIDMappings, availableGIDs)
+
+ // Hostname handling:
+ // If we have a UTS namespace, set Hostname in the OCI spec.
+ // Set the HOSTNAME environment variable unless explicitly overridden by
+ // the user (already present in OCI spec). If we don't have a UTS ns,
+ // set it to the host's hostname instead.
+ hostname := c.Hostname()
+ foundUTS := false
+
for _, i := range c.config.Spec.Linux.Namespaces {
if i.Type == spec.UTSNamespace && i.Path == "" {
- hostname := c.Hostname()
+ foundUTS = true
g.SetHostname(hostname)
- g.AddProcessEnv("HOSTNAME", hostname)
break
}
}
+ if !foundUTS {
+ tmpHostname, err := os.Hostname()
+ if err != nil {
+ return nil, err
+ }
+ hostname = tmpHostname
+ }
+ needEnv := true
+ for _, checkEnv := range g.Config.Process.Env {
+ if strings.SplitN(checkEnv, "=", 2)[0] == "HOSTNAME" {
+ needEnv = false
+ break
+ }
+ }
+ if needEnv {
+ g.AddProcessEnv("HOSTNAME", hostname)
+ }
if c.config.UTSNsCtr != "" {
if err := c.addNamespaceContainer(&g, UTSNS, c.config.UTSNsCtr, spec.UTSNamespace); err != nil {
@@ -767,11 +800,11 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
return nil
}
-func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) error {
- if (len(c.config.NamedVolumes) > 0) || (len(c.Dependencies()) > 0) {
- return errors.Errorf("Cannot export checkpoints of containers with named volumes or dependencies")
+func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
+ if len(c.Dependencies()) > 0 {
+ return errors.Errorf("Cannot export checkpoints of containers with dependencies")
}
- logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), dest)
+ logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
includeFiles := []string{
"checkpoint",
@@ -781,10 +814,13 @@ func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) error {
"spec.dump",
"network.status"}
+ if options.PreCheckPoint {
+ includeFiles[0] = "pre-checkpoint"
+ }
// Get root file-system changes included in the checkpoint archive
rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
deleteFilesList := filepath.Join(c.bundlePath(), "deleted.files")
- if !ignoreRootfs {
+ if !options.IgnoreRootfs {
// To correctly track deleted files, let's go through the output of 'podman diff'
tarFiles, err := c.runtime.GetDiff("", c.ID())
if err != nil {
@@ -847,6 +883,55 @@ func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) error {
}
}
+ // Folder containing archived volumes that will be included in the export
+ expVolDir := filepath.Join(c.bundlePath(), "volumes")
+
+ // Create an archive for each volume associated with the container
+ if !options.IgnoreVolumes {
+ if err := os.MkdirAll(expVolDir, 0700); err != nil {
+ return errors.Wrapf(err, "error creating volumes export directory %q", expVolDir)
+ }
+
+ for _, v := range c.config.NamedVolumes {
+ volumeTarFilePath := filepath.Join("volumes", v.Name+".tar")
+ volumeTarFileFullPath := filepath.Join(c.bundlePath(), volumeTarFilePath)
+
+ volumeTarFile, err := os.Create(volumeTarFileFullPath)
+ if err != nil {
+ return errors.Wrapf(err, "error creating %q", volumeTarFileFullPath)
+ }
+
+ volume, err := c.runtime.GetVolume(v.Name)
+ if err != nil {
+ return err
+ }
+
+ mp, err := volume.MountPoint()
+ if err != nil {
+ return err
+ }
+ if mp == "" {
+ return errors.Wrapf(define.ErrInternal, "volume %s is not mounted, cannot export", volume.Name())
+ }
+
+ input, err := archive.TarWithOptions(mp, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ IncludeSourceDir: true,
+ })
+ if err != nil {
+ return errors.Wrapf(err, "error reading volume directory %q", v.Dest)
+ }
+
+ _, err = io.Copy(volumeTarFile, input)
+ if err != nil {
+ return err
+ }
+ volumeTarFile.Close()
+
+ includeFiles = append(includeFiles, volumeTarFilePath)
+ }
+ }
+
input, err := archive.TarWithOptions(c.bundlePath(), &archive.TarOptions{
Compression: archive.Gzip,
IncludeSourceDir: true,
@@ -857,13 +942,13 @@ func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) error {
return errors.Wrapf(err, "error reading checkpoint directory %q", c.ID())
}
- outFile, err := os.Create(dest)
+ outFile, err := os.Create(options.TargetFile)
if err != nil {
- return errors.Wrapf(err, "error creating checkpoint export file %q", dest)
+ return errors.Wrapf(err, "error creating checkpoint export file %q", options.TargetFile)
}
defer outFile.Close()
- if err := os.Chmod(dest, 0600); err != nil {
+ if err := os.Chmod(options.TargetFile, 0600); err != nil {
return err
}
@@ -875,15 +960,19 @@ func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) error {
os.Remove(rootfsDiffPath)
os.Remove(deleteFilesList)
+ if !options.IgnoreVolumes {
+ os.RemoveAll(expVolDir)
+ }
+
return nil
}
func (c *Container) checkpointRestoreSupported() error {
if !criu.CheckForCriu() {
- return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion)
+ return errors.Errorf("checkpoint/restore requires at least CRIU %d", criu.MinCriuVersion)
}
if !c.ociRuntime.SupportsCheckpoint() {
- return errors.Errorf("Configured runtime does not support checkpoint/restore")
+ return errors.Errorf("configured runtime does not support checkpoint/restore")
}
return nil
}
@@ -915,7 +1004,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if c.AutoRemove() && options.TargetFile == "" {
- return errors.Errorf("Cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
+ return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
}
if err := c.checkpointRestoreLabelLog("dump.log"); err != nil {
@@ -939,15 +1028,24 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
defer c.newContainerEvent(events.Checkpoint)
+ // There is a bug from criu: https://github.com/checkpoint-restore/criu/issues/116
+ // We have to change the symbolic link from absolute path to relative path
+ if options.WithPrevious {
+ os.Remove(path.Join(c.CheckpointPath(), "parent"))
+ if err := os.Symlink("../pre-checkpoint", path.Join(c.CheckpointPath(), "parent")); err != nil {
+ return err
+ }
+ }
+
if options.TargetFile != "" {
- if err = c.exportCheckpoint(options.TargetFile, options.IgnoreRootfs); err != nil {
+ if err = c.exportCheckpoint(options); err != nil {
return err
}
}
logrus.Debugf("Checkpointed container %s", c.ID())
- if !options.KeepRunning {
+ if !options.KeepRunning && !options.PreCheckPoint {
c.state.State = define.ContainerStateStopped
// Cleanup Storage and Network
@@ -956,7 +1054,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
}
- if !options.Keep {
+ if !options.Keep && !options.PreCheckPoint {
cleanup := []string{
"dump.log",
"stats-dump",
@@ -992,18 +1090,33 @@ func (c *Container) importCheckpoint(input string) error {
}
err = archive.Untar(archiveFile, c.bundlePath(), options)
if err != nil {
- return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input)
}
// Make sure the newly created config.json exists on disk
g := generate.Generator{Config: c.config.Spec}
if err = c.saveSpec(g.Config); err != nil {
- return errors.Wrap(err, "Saving imported container specification for restore failed")
+ return errors.Wrap(err, "saving imported container specification for restore failed")
}
return nil
}
+func (c *Container) importPreCheckpoint(input string) error {
+ archiveFile, err := os.Open(input)
+ if err != nil {
+ return errors.Wrap(err, "failed to open pre-checkpoint archive for import")
+ }
+
+ defer archiveFile.Close()
+
+ err = archive.Untar(archiveFile, c.bundlePath(), nil)
+ if err != nil {
+ return errors.Wrapf(err, "Unpacking of pre-checkpoint archive %s failed", input)
+ }
+ return nil
+}
+
func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (retErr error) {
if err := c.checkpointRestoreSupported(); err != nil {
return err
@@ -1013,6 +1126,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
}
+ if options.ImportPrevious != "" {
+ if err := c.importPreCheckpoint(options.ImportPrevious); err != nil {
+ return err
+ }
+ }
+
if options.TargetFile != "" {
if err := c.importCheckpoint(options.TargetFile); err != nil {
return err
@@ -1022,7 +1141,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Let's try to stat() CRIU's inventory file. If it does not exist, it makes
// no sense to try a restore. This is a minimal check if a checkpoint exist.
if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) {
- return errors.Wrapf(err, "A complete checkpoint for this container cannot be found, cannot restore")
+ return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
}
if err := c.checkpointRestoreLabelLog("restore.log"); err != nil {
@@ -1170,6 +1289,36 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return err
}
+ // When restoring from an imported archive, allow restoring the content of volumes.
+ // Volumes are created in setupContainer()
+ if options.TargetFile != "" && !options.IgnoreVolumes {
+ for _, v := range c.config.NamedVolumes {
+ volumeFilePath := filepath.Join(c.bundlePath(), "volumes", v.Name+".tar")
+
+ volumeFile, err := os.Open(volumeFilePath)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open volume file %s", volumeFilePath)
+ }
+ defer volumeFile.Close()
+
+ volume, err := c.runtime.GetVolume(v.Name)
+ if err != nil {
+ return errors.Wrapf(err, "failed to retrieve volume %s", v.Name)
+ }
+
+ mountPoint, err := volume.MountPoint()
+ if err != nil {
+ return err
+ }
+ if mountPoint == "" {
+ return errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name())
+ }
+ if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil {
+ return errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint)
+ }
+ }
+ }
+
// Before actually restarting the container, apply the root file-system changes
if !options.IgnoreRootfs {
rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
@@ -1222,6 +1371,10 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
logrus.Debugf("Non-fatal: removal of checkpoint directory (%s) failed: %v", c.CheckpointPath(), err)
}
+ err = os.RemoveAll(c.PreCheckPointPath())
+ if err != nil {
+ logrus.Debugf("Non-fatal: removal of pre-checkpoint directory (%s) failed: %v", c.PreCheckPointPath(), err)
+ }
cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status", "rootfs-diff.tar", "deleted.files"}
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
@@ -1382,18 +1535,14 @@ func (c *Container) makeBindMounts() error {
}
if newPasswd != "" {
// Make /etc/passwd
- if _, ok := c.state.BindMounts["/etc/passwd"]; ok {
- // If it already exists, delete so we can recreate
- delete(c.state.BindMounts, "/etc/passwd")
- }
+ // If it already exists, delete so we can recreate
+ delete(c.state.BindMounts, "/etc/passwd")
c.state.BindMounts["/etc/passwd"] = newPasswd
}
if newGroup != "" {
// Make /etc/group
- if _, ok := c.state.BindMounts["/etc/group"]; ok {
- // If it already exists, delete so we can recreate
- delete(c.state.BindMounts, "/etc/group")
- }
+ // If it already exists, delete so we can recreate
+ delete(c.state.BindMounts, "/etc/group")
c.state.BindMounts["/etc/group"] = newGroup
}
@@ -1610,7 +1759,7 @@ func (c *Container) generateHosts(path string) (string, error) {
// FIXME. Path should be used by this function,but I am not sure what is correct; remove //lint
// once this is fixed
func (c *Container) appendHosts(path string, netCtr *Container) (string, error) { //nolint
- return c.appendStringToRundir("hosts", netCtr.getHosts())
+ return c.appendStringToRunDir("hosts", netCtr.getHosts())
}
// getHosts finds the pertinent information for a container's host file in its config and state
diff --git a/libpod/container_log.go b/libpod/container_log.go
index e58503bd3..f16e08353 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -82,7 +82,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
if nll.Partial() {
partial += nll.Msg
continue
- } else if !nll.Partial() && len(partial) > 1 {
+ } else if !nll.Partial() && len(partial) > 0 {
nll.Msg = partial + nll.Msg
partial = ""
}
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index c61f7c159..9a93e2ffd 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -4,7 +4,6 @@ import (
"time"
"github.com/containers/image/v5/manifest"
- "github.com/containers/podman/v2/libpod/driver"
)
// InspectContainerConfig holds further data about how a container was initially
@@ -635,7 +634,7 @@ type InspectContainerData struct {
EffectiveCaps []string `json:"EffectiveCaps"`
BoundingCaps []string `json:"BoundingCaps"`
ExecIDs []string `json:"ExecIDs"`
- GraphDriver *driver.Data `json:"GraphDriver"`
+ GraphDriver *DriverData `json:"GraphDriver"`
SizeRw *int64 `json:"SizeRw,omitempty"`
SizeRootFs int64 `json:"SizeRootFs,omitempty"`
Mounts []InspectMount `json:"Mounts"`
@@ -700,3 +699,9 @@ type InspectExecProcess struct {
// User is the user the exec session was started as.
User string `json:"user"`
}
+
+// DriverData handles the data for a storage driver
+type DriverData struct {
+ Name string `json:"Name"`
+ Data map[string]string `json:"Data"`
+}
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
index 825e77387..5d2bc9099 100644
--- a/libpod/define/containerstate.go
+++ b/libpod/define/containerstate.go
@@ -28,6 +28,9 @@ const (
// ContainerStateRemoving indicates the container is in the process of
// being removed.
ContainerStateRemoving ContainerStatus = iota
+ // ContainerStateStopping indicates the container is in the process of
+ // being stopped.
+ ContainerStateStopping ContainerStatus = iota
)
// ContainerStatus returns a string representation for users
@@ -50,6 +53,8 @@ func (t ContainerStatus) String() string {
return "exited"
case ContainerStateRemoving:
return "removing"
+ case ContainerStateStopping:
+ return "stopping"
}
return "bad state"
}
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index b96d36429..d37bc397e 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -2,6 +2,7 @@ package define
import (
"errors"
+ "fmt"
)
var (
@@ -34,6 +35,10 @@ var (
// aliases.
ErrNoAliases = errors.New("no aliases for container")
+ // ErrMissingPlugin indicates that the requested operation requires a
+ // plugin that is not present on the system or in the configuration.
+ ErrMissingPlugin = errors.New("required plugin missing")
+
// ErrCtrExists indicates a container with the same name or ID already
// exists
ErrCtrExists = errors.New("container already exists")
@@ -181,4 +186,16 @@ var (
// ErrNoNetwork indicates that a container has no net namespace, like network=none
ErrNoNetwork = errors.New("container has no network namespace")
+
+ // ErrSetSecurityAttribute indicates that a request to set a container's security attribute
+ // was not possible.
+ ErrSetSecurityAttribute = fmt.Errorf("%w: unable to assign security attribute", ErrOCIRuntime)
+
+ // ErrGetSecurityAttribute indicates that a request to get a container's security attribute
+ // was not possible.
+ ErrGetSecurityAttribute = fmt.Errorf("%w: unable to get security attribute", ErrOCIRuntime)
+
+ // ErrSecurityAttribute indicates that an error processing security attributes
+ // for the container
+ ErrSecurityAttribute = fmt.Errorf("%w: unable to process security attribute", ErrOCIRuntime)
)
diff --git a/libpod/define/volume_inspect.go b/libpod/define/volume_inspect.go
new file mode 100644
index 000000000..20602ea16
--- /dev/null
+++ b/libpod/define/volume_inspect.go
@@ -0,0 +1,51 @@
+package define
+
+import (
+ "time"
+)
+
+// InspectVolumeData is the output of Inspect() on a volume. It is matched to
+// the format of 'docker volume inspect'.
+type InspectVolumeData struct {
+ // Name is the name of the volume.
+ Name string `json:"Name"`
+ // Driver is the driver used to create the volume.
+ // If set to "local" or "", the Local driver (Podman built-in code) is
+ // used to service the volume; otherwise, a volume plugin with the given
+ // name is used to mount and manage the volume.
+ Driver string `json:"Driver"`
+ // Mountpoint is the path on the host where the volume is mounted.
+ Mountpoint string `json:"Mountpoint"`
+ // CreatedAt is the date and time the volume was created at. This is not
+ // stored for older Libpod volumes; if so, it will be omitted.
+ CreatedAt time.Time `json:"CreatedAt,omitempty"`
+ // Status is used to return information on the volume's current state,
+ // if the volume was created using a volume plugin (uses a Driver that
+ // is not the local driver).
+ // Status is provided to us by an external program, so no guarantees are
+ // made about its format or contents. Further, it is an optional field,
+ // so it may not be set even in cases where a volume plugin is in use.
+ Status map[string]interface{} `json:"Status,omitempty"`
+ // Labels includes the volume's configured labels, key:value pairs that
+ // can be passed during volume creation to provide information for third
+ // party tools.
+ Labels map[string]string `json:"Labels"`
+ // Scope is unused and provided solely for Docker compatibility. It is
+ // unconditionally set to "local".
+ Scope string `json:"Scope"`
+ // Options is a set of options that were used when creating the volume.
+ // For the Local driver, these are mount options that will be used to
+ // determine how a local filesystem is mounted; they are handled as
+ // parameters to Mount in a manner described in the volume create
+ // manpage.
+ // For non-local drivers, these are passed as-is to the volume plugin.
+ Options map[string]string `json:"Options"`
+ // UID is the UID that the volume was created with.
+ UID int `json:"UID,omitempty"`
+ // GID is the GID that the volume was created with.
+ GID int `json:"GID,omitempty"`
+ // Anonymous indicates that the volume was created as an anonymous
+ // volume for a specific container, and will be be removed when any
+ // container using it is removed.
+ Anonymous bool `json:"Anonymous,omitempty"`
+}
diff --git a/libpod/driver/driver.go b/libpod/driver/driver.go
index 85eda5a21..de71c1f6e 100644
--- a/libpod/driver/driver.go
+++ b/libpod/driver/driver.go
@@ -1,40 +1,17 @@
package driver
import (
- cstorage "github.com/containers/storage"
+ "github.com/containers/podman/v2/libpod/define"
+ "github.com/containers/storage"
)
-// Data handles the data for a storage driver
-type Data struct {
- Name string `json:"Name"`
- Data map[string]string `json:"Data"`
-}
-
-// GetDriverName returns the name of the driver for the given store
-func GetDriverName(store cstorage.Store) (string, error) {
- driver, err := store.GraphDriver()
- if err != nil {
- return "", err
- }
- return driver.String(), nil
-}
-
-// GetDriverMetadata returns the metadata regarding the driver for the layer in the given store
-func GetDriverMetadata(store cstorage.Store, layerID string) (map[string]string, error) {
+// GetDriverData returns information on a given store's running graph driver.
+func GetDriverData(store storage.Store, layerID string) (*define.DriverData, error) {
driver, err := store.GraphDriver()
if err != nil {
return nil, err
}
- return driver.Metadata(layerID)
-}
-
-// GetDriverData returns the Data struct with information of the driver used by the store
-func GetDriverData(store cstorage.Store, layerID string) (*Data, error) {
- name, err := GetDriverName(store)
- if err != nil {
- return nil, err
- }
- metaData, err := GetDriverMetadata(store, layerID)
+ metaData, err := driver.Metadata(layerID)
if err != nil {
return nil, err
}
@@ -42,8 +19,8 @@ func GetDriverData(store cstorage.Store, layerID string) (*Data, error) {
delete(metaData, "MergedDir")
}
- return &Data{
- Name: name,
+ return &define.DriverData{
+ Name: driver.String(),
Data: metaData,
}, nil
}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 71c638017..8b7e448b1 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -84,7 +84,11 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
if err != nil {
return err
}
-
+ defer func() {
+ if err := j.Close(); err != nil {
+ logrus.Errorf("Unable to close journal :%v", err)
+ }
+ }()
// match only podman journal entries
podmanJournal := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
if err := j.AddMatch(podmanJournal.String()); err != nil {
@@ -112,7 +116,6 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
if err != nil {
return errors.Wrap(err, "failed to get journal cursor")
}
-
for {
select {
case <-ctx.Done():
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 5c3f3b9e4..d732aecfe 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -17,7 +17,6 @@ import (
"github.com/containers/common/pkg/retry"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
- "github.com/containers/image/v5/docker/archive"
dockerarchive "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
@@ -30,6 +29,7 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
+ "github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/driver"
"github.com/containers/podman/v2/libpod/events"
"github.com/containers/podman/v2/pkg/inspect"
@@ -37,7 +37,6 @@ import (
"github.com/containers/podman/v2/pkg/util"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
@@ -185,7 +184,7 @@ func (ir *Runtime) SaveImages(ctx context.Context, namesOrIDs []string, format s
sys := GetSystemContext("", "", false)
- archWriter, err := archive.NewWriter(sys, outputFile)
+ archWriter, err := dockerarchive.NewWriter(sys, outputFile)
if err != nil {
return err
}
@@ -291,7 +290,7 @@ func (ir *Runtime) LoadAllImagesFromDockerArchive(ctx context.Context, fileName
}
sc := GetSystemContext(signaturePolicyPath, "", false)
- reader, err := archive.NewReader(sc, fileName)
+ reader, err := dockerarchive.NewReader(sc, fileName)
if err != nil {
return nil, err
}
@@ -497,7 +496,7 @@ func (ir *Runtime) getLocalImage(inputName string) (string, *storage.Image, erro
return inputName, repoImage, nil
}
- return "", nil, errors.Wrapf(ErrNoSuchImage, err.Error())
+ return "", nil, err
}
// ID returns the image ID as a string
@@ -972,7 +971,7 @@ func (i *Image) toImageRef(ctx context.Context) (types.Image, error) {
}
// DriverData gets the driver data from the store on a layer
-func (i *Image) DriverData() (*driver.Data, error) {
+func (i *Image) DriverData() (*define.DriverData, error) {
return driver.GetDriverData(i.imageruntime.store, i.TopLayer())
}
@@ -1148,7 +1147,7 @@ func (i *Image) GetLabel(ctx context.Context, label string) (string, error) {
}
for k, v := range labels {
- if strings.ToLower(k) == strings.ToLower(label) {
+ if strings.EqualFold(k, label) {
return v, nil
}
}
@@ -1326,7 +1325,7 @@ func (ir *Runtime) Import(ctx context.Context, path, reference string, writer io
annotations := make(map[string]string)
- // config imgspecv1.Image
+ // config ociv1.Image
err = updater.ConfigUpdate(imageConfig, annotations)
if err != nil {
return nil, errors.Wrapf(err, "error updating image config")
@@ -1435,7 +1434,7 @@ func (i *Image) IsParent(ctx context.Context) (bool, error) {
// historiesMatch returns the number of entries in the histories which have the
// same contents
-func historiesMatch(a, b []imgspecv1.History) int {
+func historiesMatch(a, b []ociv1.History) int {
i := 0
for i < len(a) && i < len(b) {
if a[i].Created != nil && b[i].Created == nil {
@@ -1468,7 +1467,7 @@ func historiesMatch(a, b []imgspecv1.History) int {
// areParentAndChild checks diff ID and history in the two images and return
// true if the second should be considered to be directly based on the first
-func areParentAndChild(parent, child *imgspecv1.Image) bool {
+func areParentAndChild(parent, child *ociv1.Image) bool {
// the child and candidate parent should share all of the
// candidate parent's diff IDs, which together would have
// controlled which layers were used
@@ -1621,7 +1620,7 @@ func (i *Image) Save(ctx context.Context, source, format, output string, moreTag
if err != nil {
return errors.Wrapf(err, "error getting the OCI directory ImageReference for (%q, %q)", output, destImageName)
}
- manifestType = imgspecv1.MediaTypeImageManifest
+ manifestType = ociv1.MediaTypeImageManifest
case "docker-dir":
destRef, err = directory.NewReference(output)
if err != nil {
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
index b38265a7e..587c99333 100644
--- a/libpod/image/prune.go
+++ b/libpod/image/prune.go
@@ -6,6 +6,7 @@ import (
"time"
"github.com/containers/podman/v2/libpod/events"
+ "github.com/containers/podman/v2/pkg/domain/entities/reports"
"github.com/containers/podman/v2/pkg/timetype"
"github.com/containers/storage"
"github.com/pkg/errors"
@@ -28,7 +29,7 @@ func generatePruneFilterFuncs(filter, filterValue string) (ImageFilter, error) {
return false
}
for labelKey, labelValue := range labels {
- if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ if labelKey == filterKey && (filterValue == "" || labelValue == filterValue) {
return true
}
}
@@ -110,7 +111,8 @@ func (ir *Runtime) GetPruneImages(ctx context.Context, all bool, filterFuncs []I
// PruneImages prunes dangling and optionally all unused images from the local
// image store
-func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) {
+func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) ([]*reports.PruneReport, error) {
+ preports := make([]*reports.PruneReport, 0)
filterFuncs := make([]ImageFilter, 0, len(filter))
for _, f := range filter {
filterSplit := strings.SplitN(f, "=", 2)
@@ -125,7 +127,6 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) (
filterFuncs = append(filterFuncs, generatedFunc)
}
- pruned := []string{}
prev := 0
for {
toPrune, err := ir.GetPruneImages(ctx, all, filterFuncs)
@@ -143,6 +144,13 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) (
if err != nil {
return nil, err
}
+ nameOrID := img.ID()
+ s, err := img.Size(ctx)
+ imgSize := *s
+ if err != nil {
+ logrus.Warnf("Failed to collect image size for: %s, %s", nameOrID, err)
+ imgSize = 0
+ }
if err := img.Remove(ctx, false); err != nil {
if errors.Cause(err) == storage.ErrImageUsedByContainer {
logrus.Warnf("Failed to prune image %s as it is in use: %v.\nA container associated with containers/storage (e.g., Buildah, CRI-O, etc.) maybe associated with this image.\nUsing the rmi command with the --force option will remove the container and image, but may cause failures for other dependent systems.", img.ID(), err)
@@ -151,13 +159,18 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) (
return nil, errors.Wrap(err, "failed to prune image")
}
defer img.newImageEvent(events.Prune)
- nameOrID := img.ID()
+
if len(repotags) > 0 {
nameOrID = repotags[0]
}
- pruned = append(pruned, nameOrID)
+
+ preports = append(preports, &reports.PruneReport{
+ Id: nameOrID,
+ Err: nil,
+ Size: uint64(imgSize),
+ })
}
}
- return pruned, nil
+ return preports, nil
}
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index c37929927..996b5995a 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -11,7 +11,6 @@ import (
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/docker"
- "github.com/containers/image/v5/docker/archive"
dockerarchive "github.com/containers/image/v5/docker/archive"
ociarchive "github.com/containers/image/v5/oci/archive"
oci "github.com/containers/image/v5/oci/layout"
@@ -130,7 +129,7 @@ func (ir *Runtime) getSinglePullRefPairGoal(srcRef types.ImageReference, destNam
// getPullRefPairsFromDockerArchiveReference returns a slice of pullRefPairs
// for the specified docker reference and the corresponding archive.Reader.
-func (ir *Runtime) getPullRefPairsFromDockerArchiveReference(ctx context.Context, reader *archive.Reader, ref types.ImageReference, sc *types.SystemContext) ([]pullRefPair, error) {
+func (ir *Runtime) getPullRefPairsFromDockerArchiveReference(ctx context.Context, reader *dockerarchive.Reader, ref types.ImageReference, sc *types.SystemContext) ([]pullRefPair, error) {
destNames, err := reader.ManifestTagsForReference(ref)
if err != nil {
return nil, err
@@ -178,7 +177,7 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
// supports pulling from docker-archive, oci, and registries
switch srcRef.Transport().Name() {
case DockerArchive:
- reader, readerRef, err := archive.NewReaderForReference(sc, srcRef)
+ reader, readerRef, err := dockerarchive.NewReaderForReference(sc, srcRef)
if err != nil {
return nil, err
}
@@ -432,7 +431,7 @@ func checkRemoteImageForLabel(ctx context.Context, label string, imageInfo pullR
}
// Labels are case insensitive; so we iterate instead of simple lookup
for k := range remoteInspect.Labels {
- if strings.ToLower(label) == strings.ToLower(k) {
+ if strings.EqualFold(label, k) {
return nil
}
}
diff --git a/libpod/image/utils.go b/libpod/image/utils.go
index 727c73a71..5e7fed5c6 100644
--- a/libpod/image/utils.go
+++ b/libpod/image/utils.go
@@ -45,7 +45,8 @@ func findImageInRepotags(search imageParts, images []*Image) (*storage.Image, er
}
}
if len(candidates) == 0 {
- return nil, errors.Errorf("unable to find a name and tag match for %s in repotags", searchName)
+
+ return nil, errors.Wrapf(define.ErrNoSuchImage, "unable to find a name and tag match for %s in repotags", searchName)
}
// If more then one candidate and the candidates all have same name
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 6c0cde531..9285589b1 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -437,12 +437,8 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
}
// Remove our network aliases
- if _, ok := s.ctrNetworkAliases[ctr.ID()]; ok {
- delete(s.ctrNetworkAliases, ctr.ID())
- }
- if _, ok := s.ctrNetworks[ctr.ID()]; ok {
- delete(s.ctrNetworks, ctr.ID())
- }
+ delete(s.ctrNetworkAliases, ctr.ID())
+ delete(s.ctrNetworks, ctr.ID())
return nil
}
@@ -680,9 +676,7 @@ func (s *InMemoryState) NetworkDisconnect(ctr *Container, network string) error
ctrAliases = make(map[string][]string)
s.ctrNetworkAliases[ctr.ID()] = ctrAliases
}
- if _, ok := ctrAliases[network]; ok {
- delete(ctrAliases, network)
- }
+ delete(ctrAliases, network)
return nil
}
@@ -1523,12 +1517,8 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
}
// Remove our network aliases
- if _, ok := s.ctrNetworkAliases[ctr.ID()]; ok {
- delete(s.ctrNetworkAliases, ctr.ID())
- }
- if _, ok := s.ctrNetworks[ctr.ID()]; ok {
- delete(s.ctrNetworks, ctr.ID())
- }
+ delete(s.ctrNetworkAliases, ctr.ID())
+ delete(s.ctrNetworks, ctr.ID())
return nil
}
diff --git a/libpod/network/netconflist.go b/libpod/network/netconflist.go
index bf7d03501..165a9067b 100644
--- a/libpod/network/netconflist.go
+++ b/libpod/network/netconflist.go
@@ -216,7 +216,7 @@ func IfPassesFilter(netconf *libcni.NetworkConfigList, filters map[string][]stri
filterValue = ""
}
for labelKey, labelValue := range labels {
- if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ if labelKey == filterKey && (filterValue == "" || labelValue == filterValue) {
result = true
continue outer
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index be6867399..addf1814c 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -685,7 +685,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
return errors.Wrapf(err, "failed to generate random netns name")
}
- nsPath := fmt.Sprintf("/var/run/netns/cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
+ nsPath := fmt.Sprintf("/run/netns/cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
if err := os.MkdirAll(filepath.Dir(nsPath), 0711); err != nil {
return err
diff --git a/libpod/oci.go b/libpod/oci.go
index 157c42c38..6948e6425 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -151,8 +151,6 @@ type OCIRuntime interface {
type ExecOptions struct {
// Cmd is the command to execute.
Cmd []string
- // CapAdd is a set of capabilities to add to the executed command.
- CapAdd []string
// Env is a set of environment variables to add to the container.
Env map[string]string
// Terminal is whether to create a new TTY for the exec session.
@@ -181,6 +179,8 @@ type ExecOptions struct {
// ExitCommandDelay is a delay (in seconds) between the exec session
// exiting, and the exit command being invoked.
ExitCommandDelay uint
+ // Privileged indicates the execed process will be launched in Privileged mode
+ Privileged bool
}
// HTTPAttachStreams informs the HTTPAttach endpoint which of the container's
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go
index fbc95510e..4556eba94 100644
--- a/libpod/oci_attach_linux.go
+++ b/libpod/oci_attach_linux.go
@@ -28,6 +28,15 @@ const (
AttachPipeStderr = 3
)
+func openUnixSocket(path string) (*net.UnixConn, error) {
+ fd, err := unix.Open(path, unix.O_PATH, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer unix.Close(fd)
+ return net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: fmt.Sprintf("/proc/self/fd/%d", fd), Net: "unixpacket"})
+}
+
// Attach to the given container
// Does not check if state is appropriate
// started is only required if startContainer is true
@@ -52,11 +61,10 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
if err != nil {
return err
}
- socketPath := buildSocketPath(attachSock)
- conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ conn, err := openUnixSocket(attachSock)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -124,7 +132,6 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se
if err != nil {
return err
}
- socketPath := buildSocketPath(sockPath)
// 2: read from attachFd that the parent process has set up the console socket
if _, err := readConmonPipeData(attachFd, ""); err != nil {
@@ -132,9 +139,9 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se
}
// 2: then attach
- conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ conn, err := openUnixSocket(sockPath)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -182,16 +189,6 @@ func registerResizeFunc(resize <-chan remotecommand.TerminalSize, bundlePath str
})
}
-func buildSocketPath(socketPath string) string {
- maxUnixLength := unixPathLength()
- if maxUnixLength < len(socketPath) {
- socketPath = socketPath[0:maxUnixLength]
- }
-
- logrus.Debug("connecting to socket ", socketPath)
- return socketPath
-}
-
func setupStdioChannels(streams *define.AttachStreams, conn *net.UnixConn, detachKeys []byte) (chan error, chan error) {
receiveStdoutError := make(chan error)
go func() {
diff --git a/libpod/oci_attach_linux_cgo.go b/libpod/oci_attach_linux_cgo.go
deleted file mode 100644
index d81243360..000000000
--- a/libpod/oci_attach_linux_cgo.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//+build linux,cgo
-
-package libpod
-
-//#include <sys/un.h>
-// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
-import "C"
-
-func unixPathLength() int {
- return int(C.unix_path_length())
-}
diff --git a/libpod/oci_attach_linux_nocgo.go b/libpod/oci_attach_linux_nocgo.go
deleted file mode 100644
index a514a555d..000000000
--- a/libpod/oci_attach_linux_nocgo.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//+build linux,!cgo
-
-package libpod
-
-func unixPathLength() int {
- return 107
-}
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
index f8e7020f7..dc5dd03df 100644
--- a/libpod/oci_conmon_exec_linux.go
+++ b/libpod/oci_conmon_exec_linux.go
@@ -2,7 +2,6 @@ package libpod
import (
"fmt"
- "net"
"net/http"
"os"
"os/exec"
@@ -387,7 +386,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v))
}
- processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID)
+ processFile, err := prepareProcessExec(c, options, finalEnv, sessionID)
if err != nil {
return nil, nil, err
}
@@ -398,10 +397,6 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...)
}
- for _, capability := range options.CapAdd {
- args = append(args, formatRuntimeOpts("--cap", capability)...)
- }
-
if options.Terminal {
args = append(args, "-t")
}
@@ -516,7 +511,6 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
if err != nil {
return err
}
- socketPath := buildSocketPath(sockPath)
// 2: read from attachFd that the parent process has set up the console socket
if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil {
@@ -524,9 +518,9 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
}
// 2: then attach
- conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ conn, err := openUnixSocket(sockPath)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath)
}
defer func() {
if err := conn.Close(); err != nil {
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index c99086b33..23bfb29d7 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -22,6 +22,7 @@ import (
"text/template"
"time"
+ "github.com/containers/common/pkg/capabilities"
"github.com/containers/common/pkg/config"
conmonConfig "github.com/containers/conmon/runner/config"
"github.com/containers/podman/v2/libpod/define"
@@ -193,6 +194,11 @@ func hasCurrentUserMapped(ctr *Container) bool {
// CreateContainer creates a container.
func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error {
+ // always make the run dir accessible to the current user so that the PID files can be read without
+ // being in the rootless user namespace.
+ if err := makeAccessible(ctr.state.RunDir, 0, 0); err != nil {
+ return err
+ }
if !hasCurrentUserMapped(ctr) {
for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} {
if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil {
@@ -523,13 +529,12 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
if err != nil {
return err
}
- socketPath := buildSocketPath(attachSock)
var conn *net.UnixConn
if streamAttach {
- newConn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ newConn, err := openUnixSocket(attachSock)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
}
conn = newConn
defer func() {
@@ -538,7 +543,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
}
}()
- logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), socketPath)
+ logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), attachSock)
}
detachString := ctr.runtime.config.Engine.DetachKeys
@@ -763,10 +768,14 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container
}
// imagePath is used by CRIU to store the actual checkpoint files
imagePath := ctr.CheckpointPath()
+ if options.PreCheckPoint {
+ imagePath = ctr.PreCheckPointPath()
+ }
// workPath will be used to store dump.log and stats-dump
workPath := ctr.bundlePath()
logrus.Debugf("Writing checkpoint to %s", imagePath)
logrus.Debugf("Writing checkpoint logs to %s", workPath)
+ logrus.Debugf("Pre-dump the container %t", options.PreCheckPoint)
args := []string{}
args = append(args, r.runtimeFlags...)
args = append(args, "checkpoint")
@@ -780,6 +789,15 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container
if options.TCPEstablished {
args = append(args, "--tcp-established")
}
+ if !options.PreCheckPoint && options.KeepRunning {
+ args = append(args, "--leave-running")
+ }
+ if options.PreCheckPoint {
+ args = append(args, "--pre-dump")
+ }
+ if !options.PreCheckPoint && options.WithPrevious {
+ args = append(args, "--parent-path", ctr.PreCheckPointPath())
+ }
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
@@ -788,6 +806,7 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
}
args = append(args, ctr.ID())
+ logrus.Debugf("the args to checkpoint: %s %s", r.path, strings.Join(args, " "))
return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, args...)
}
@@ -1185,26 +1204,30 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
// prepareProcessExec returns the path of the process.json used in runc exec -p
// caller is responsible to close the returned *os.File if needed.
-func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, sessionID string) (*os.File, error) {
+func prepareProcessExec(c *Container, options *ExecOptions, env []string, sessionID string) (*os.File, error) {
f, err := ioutil.TempFile(c.execBundlePath(sessionID), "exec-process-")
if err != nil {
return nil, err
}
- pspec := c.config.Spec.Process
+ pspec := new(spec.Process)
+ if err := JSONDeepCopy(c.config.Spec.Process, pspec); err != nil {
+ return nil, err
+ }
pspec.SelinuxLabel = c.config.ProcessLabel
- pspec.Args = cmd
+ pspec.Args = options.Cmd
+
// We need to default this to false else it will inherit terminal as true
// from the container.
pspec.Terminal = false
- if tty {
+ if options.Terminal {
pspec.Terminal = true
}
if len(env) > 0 {
pspec.Env = append(pspec.Env, env...)
}
- if cwd != "" {
- pspec.Cwd = cwd
+ if options.Cwd != "" {
+ pspec.Cwd = options.Cwd
}
@@ -1212,6 +1235,7 @@ func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, se
var sgids []uint32
// if the user is empty, we should inherit the user that the container is currently running with
+ user := options.User
if user == "" {
user = c.config.User
addGroups = c.config.Groups
@@ -1247,6 +1271,31 @@ func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, se
pspec.User = processUser
}
+ ctrSpec, err := c.specFromState()
+ if err != nil {
+ return nil, err
+ }
+
+ allCaps := capabilities.AllCapabilities()
+ if options.Privileged {
+ pspec.Capabilities.Bounding = allCaps
+ } else {
+ pspec.Capabilities.Bounding = ctrSpec.Process.Capabilities.Bounding
+ }
+ if execUser.Uid == 0 {
+ pspec.Capabilities.Effective = pspec.Capabilities.Bounding
+ pspec.Capabilities.Inheritable = pspec.Capabilities.Bounding
+ pspec.Capabilities.Permitted = pspec.Capabilities.Bounding
+ pspec.Capabilities.Ambient = pspec.Capabilities.Bounding
+ } else {
+ if user == c.config.User {
+ pspec.Capabilities.Effective = ctrSpec.Process.Capabilities.Effective
+ pspec.Capabilities.Inheritable = ctrSpec.Process.Capabilities.Effective
+ pspec.Capabilities.Permitted = ctrSpec.Process.Capabilities.Effective
+ pspec.Capabilities.Ambient = ctrSpec.Process.Capabilities.Effective
+ }
+ }
+
hasHomeSet := false
for _, s := range pspec.Env {
if strings.HasPrefix(s, "HOME=") {
@@ -1272,7 +1321,12 @@ func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, se
// configureConmonEnv gets the environment values to add to conmon's exec struct
// TODO this may want to be less hardcoded/more configurable in the future
func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string) ([]string, []*os.File) {
- env := make([]string, 0, 6)
+ var env []string
+ for _, e := range os.Environ() {
+ if strings.HasPrefix(e, "LC_") {
+ env = append(env, e)
+ }
+ }
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
@@ -1333,6 +1387,7 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
logDriverArg = define.NoLogging
case define.JSONLogging:
fallthrough
+ //lint:ignore ST1015 the default case has to be here
default: //nolint-stylecheck
// No case here should happen except JSONLogging, but keep this here in case the options are extended
logrus.Errorf("%s logging specified but not supported. Choosing k8s-file logging instead", ctr.LogDriver())
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index 2ba85c4b3..d40cf13bd 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -126,5 +126,17 @@ func getOCIRuntimeError(runtimeMsg string) error {
}
return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(errStr, "\n"))
}
+ if match := regexp.MustCompile("`/proc/[a-z0-9-].+/attr.*`").FindString(runtimeMsg); match != "" {
+ errStr := match
+ if includeFullOutput {
+ errStr = runtimeMsg
+ }
+ if strings.HasSuffix(match, "/exec`") {
+ return errors.Wrapf(define.ErrSetSecurityAttribute, "%s", strings.Trim(errStr, "\n"))
+ } else if strings.HasSuffix(match, "/current`") {
+ return errors.Wrapf(define.ErrGetSecurityAttribute, "%s", strings.Trim(errStr, "\n"))
+ }
+ return errors.Wrapf(define.ErrSecurityAttribute, "%s", strings.Trim(errStr, "\n"))
+ }
return errors.Wrapf(define.ErrOCIRuntime, "%s", strings.Trim(runtimeMsg, "\n"))
}
diff --git a/libpod/options.go b/libpod/options.go
index 8100eee62..c7bac7e1f 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -306,7 +306,7 @@ func WithDefaultMountsFile(mountsFile string) RuntimeOption {
// WithTmpDir sets the directory that temporary runtime files which are not
// expected to survive across reboots will be stored.
-// This should be located on a tmpfs mount (/tmp or /var/run for example).
+// This should be located on a tmpfs mount (/tmp or /run for example).
func WithTmpDir(dir string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
@@ -910,7 +910,7 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption {
ctr.config.UserNsCtr = nsCtr.ID()
ctr.config.IDMappings = nsCtr.config.IDMappings
- g := generate.NewFromSpec(ctr.config.Spec)
+ g := generate.Generator{Config: ctr.config.Spec}
g.ClearLinuxUIDMappings()
for _, uidmap := range nsCtr.config.IDMappings.UIDMap {
@@ -1549,17 +1549,6 @@ func WithVolumeDriver(driver string) VolumeCreateOption {
return define.ErrVolumeFinalized
}
- // Uncomment when volume plugins are ready for use.
- // if driver != define.VolumeDriverLocal {
- // if _, err := plugin.GetVolumePlugin(driver); err != nil {
- // return err
- // }
- // }
-
- if driver != define.VolumeDriverLocal {
- return define.ErrNotImplemented
- }
-
volume.config.Driver = driver
return nil
}
diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go
index 2500a4f36..c5dec651c 100644
--- a/libpod/plugin/volume_api.go
+++ b/libpod/plugin/volume_api.go
@@ -2,8 +2,9 @@ package plugin
import (
"bytes"
- "fmt"
+ "context"
"io/ioutil"
+ "net"
"net/http"
"os"
"path/filepath"
@@ -43,7 +44,6 @@ var (
const (
defaultTimeout = 5 * time.Second
- defaultPath = "/run/docker/plugins"
volumePluginType = "VolumeDriver"
)
@@ -64,6 +64,8 @@ type VolumePlugin struct {
Name string
// SocketPath is the unix socket at which the plugin is accessed.
SocketPath string
+ // Client is the HTTP client we use to connect to the plugin.
+ Client *http.Client
}
// This is the response from the activate endpoint of the API.
@@ -76,7 +78,7 @@ type activateResponse struct {
func validatePlugin(newPlugin *VolumePlugin) error {
// It's a socket. Is it a plugin?
// Hit the Activate endpoint to find out if it is, and if so what kind
- req, err := http.NewRequest("POST", activatePath, nil)
+ req, err := http.NewRequest("POST", "http://plugin"+activatePath, nil)
if err != nil {
return errors.Wrapf(err, "error making request to volume plugin %s activation endpoint", newPlugin.Name)
}
@@ -84,9 +86,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
req.Header.Set("Host", newPlugin.getURI())
req.Header.Set("Content-Type", sdk.DefaultContentTypeV1_1)
- client := new(http.Client)
- client.Timeout = defaultTimeout
- resp, err := client.Do(req)
+ resp, err := newPlugin.Client.Do(req)
if err != nil {
return errors.Wrapf(err, "error sending request to plugin %s activation endpoint", newPlugin.Name)
}
@@ -121,22 +121,28 @@ func validatePlugin(newPlugin *VolumePlugin) error {
return errors.Wrapf(ErrNotVolumePlugin, "plugin %s does not implement volume plugin, instead provides %s", newPlugin.Name, strings.Join(respStruct.Implements, ", "))
}
+ if plugins == nil {
+ plugins = make(map[string]*VolumePlugin)
+ }
+
plugins[newPlugin.Name] = newPlugin
return nil
}
-// GetVolumePlugin gets a single volume plugin by path.
-// TODO: We should not be auto-completing based on a default path; we should
-// require volumes to have been pre-specified in containers.conf (will need a
-// function to pre-populate the plugins list, and we should probably do a lazy
-// initialization there to not slow things down too much).
-func GetVolumePlugin(name string) (*VolumePlugin, error) {
+// GetVolumePlugin gets a single volume plugin, with the given name, at the
+// given path.
+func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
pluginsLock.Lock()
defer pluginsLock.Unlock()
plugin, exists := plugins[name]
if exists {
+ // This shouldn't be possible, but just in case...
+ if plugin.SocketPath != filepath.Clean(path) {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "requested path %q for volume plugin %s does not match pre-existing path for plugin, %q", path, name, plugin.SocketPath)
+ }
+
return plugin, nil
}
@@ -144,7 +150,20 @@ func GetVolumePlugin(name string) (*VolumePlugin, error) {
newPlugin := new(VolumePlugin)
newPlugin.Name = name
- newPlugin.SocketPath = filepath.Join(defaultPath, fmt.Sprintf("%s.sock", name))
+ newPlugin.SocketPath = filepath.Clean(path)
+
+ // Need an HTTP client to force a Unix connection.
+ // And since we can reuse it, might as well cache it.
+ client := new(http.Client)
+ client.Timeout = defaultTimeout
+ // This bit borrowed from pkg/bindings/connection.go
+ client.Transport = &http.Transport{
+ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return (&net.Dialer{}).DialContext(ctx, "unix", newPlugin.SocketPath)
+ },
+ DisableCompression: true,
+ }
+ newPlugin.Client = client
stat, err := os.Stat(newPlugin.SocketPath)
if err != nil {
@@ -183,6 +202,7 @@ func (p *VolumePlugin) verifyReachable() error {
}
// Send a request to the volume plugin for handling.
+// Callers *MUST* close the response when they are done.
func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint string) (*http.Response, error) {
var (
reqJSON []byte
@@ -196,7 +216,7 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st
}
}
- req, err := http.NewRequest("POST", endpoint, bytes.NewReader(reqJSON))
+ req, err := http.NewRequest("POST", "http://plugin"+endpoint, bytes.NewReader(reqJSON))
if err != nil {
return nil, errors.Wrapf(err, "error making request to volume plugin %s endpoint %s", p.Name, endpoint)
}
@@ -204,13 +224,12 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st
req.Header.Set("Host", p.getURI())
req.Header.Set("Content-Type", sdk.DefaultContentTypeV1_1)
- client := new(http.Client)
- client.Timeout = defaultTimeout
- resp, err := client.Do(req)
+ resp, err := p.Client.Do(req)
if err != nil {
return nil, errors.Wrapf(err, "error sending request to volume plugin %s endpoint %s", p.Name, endpoint)
}
- defer resp.Body.Close()
+ // We are *deliberately not closing* response here. It is the
+ // responsibility of the caller to do so after reading the response.
return resp, nil
}
diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go
index ce8a87759..9a980750f 100644
--- a/libpod/rootless_cni_linux.go
+++ b/libpod/rootless_cni_linux.go
@@ -110,6 +110,8 @@ func DeallocRootlessCNI(ctx context.Context, c *Container) error {
logrus.Warn(err)
}
logrus.Debugf("rootless CNI: removing infra container %q", infra.ID())
+ infra.lock.Lock()
+ defer infra.lock.Unlock()
if err := c.runtime.removeContainer(ctx, infra, true, false, true); err != nil {
return err
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 1004e4fa7..34c737a67 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/podman/v2/libpod/events"
"github.com/containers/podman/v2/libpod/image"
"github.com/containers/podman/v2/libpod/lock"
+ "github.com/containers/podman/v2/libpod/plugin"
"github.com/containers/podman/v2/libpod/shutdown"
"github.com/containers/podman/v2/pkg/cgroups"
"github.com/containers/podman/v2/pkg/registries"
@@ -888,3 +889,18 @@ func (r *Runtime) reloadStorageConf() error {
logrus.Infof("applied new storage configuration: %v", r.storageConfig)
return nil
}
+
+// getVolumePlugin gets a specific volume plugin given its name.
+func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) {
+ // There is no plugin for local.
+ if name == define.VolumeDriverLocal || name == "" {
+ return nil, nil
+ }
+
+ pluginPath, ok := r.config.Engine.VolumePlugins[name]
+ if !ok {
+ return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name)
+ }
+
+ return plugin.GetVolumePlugin(name, pluginPath)
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 14b537ca2..d2bcd8db3 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -14,6 +14,7 @@ import (
"github.com/containers/podman/v2/libpod/events"
"github.com/containers/podman/v2/libpod/shutdown"
"github.com/containers/podman/v2/pkg/cgroups"
+ "github.com/containers/podman/v2/pkg/domain/entities/reports"
"github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
@@ -71,6 +72,140 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
return r.setupContainer(ctx, ctr)
}
+// RenameContainer renames the given container.
+// The given container object will be rendered unusable, and a new, renamed
+// Container will be returned.
+func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) {
+ ctr.lock.Lock()
+ defer ctr.lock.Unlock()
+
+ if err := ctr.syncContainer(); err != nil {
+ return nil, err
+ }
+
+ if newName == "" || !define.NameRegex.MatchString(newName) {
+ return nil, define.RegexError
+ }
+
+ // Check if the name is available.
+ // This is *100% NOT ATOMIC* so any failures in-flight will do
+ // *VERY BAD THINGS* to the state. So we have to try and catch all we
+ // can before starting.
+ if _, err := r.state.LookupContainerID(newName); err == nil {
+ return nil, errors.Wrapf(define.ErrCtrExists, "name %s is already in use by another container", newName)
+ }
+ if _, err := r.state.LookupPod(newName); err == nil {
+ return nil, errors.Wrapf(define.ErrPodExists, "name %s is already in use by another pod", newName)
+ }
+
+ // TODO: Investigate if it is possible to remove this limitation.
+ depCtrs, err := r.state.ContainerInUse(ctr)
+ if err != nil {
+ return nil, err
+ }
+ if len(depCtrs) > 0 {
+ return nil, errors.Wrapf(define.ErrCtrExists, "cannot rename container %s as it is in use by other containers: %v", ctr.ID(), strings.Join(depCtrs, ","))
+ }
+
+ // We need to pull an updated config, in case another rename fired and
+ // the config was re-written.
+ newConf, err := r.state.GetContainerConfig(ctr.ID())
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", ctr.ID())
+ }
+ ctr.config = newConf
+
+ // TODO: This is going to fail if we have active exec sessions, too.
+ // Investigate fixing that at a later date.
+
+ var pod *Pod
+ if ctr.config.Pod != "" {
+ tmpPod, err := r.state.Pod(ctr.config.Pod)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving container %s pod", ctr.ID())
+ }
+ pod = tmpPod
+ // Lock pod to ensure it's not removed while we're working
+ pod.lock.Lock()
+ defer pod.lock.Unlock()
+ }
+
+ // Lock all volumes to ensure they are not removed while we're working
+ volsLocked := make(map[string]bool)
+ for _, namedVol := range ctr.config.NamedVolumes {
+ if volsLocked[namedVol.Name] {
+ continue
+ }
+ vol, err := r.state.Volume(namedVol.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving volume used by container %s", ctr.ID())
+ }
+
+ volsLocked[vol.Name()] = true
+ vol.lock.Lock()
+ defer vol.lock.Unlock()
+ }
+
+ logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName)
+
+ // Step 1: remove the old container.
+ if pod != nil {
+ if err := r.state.RemoveContainerFromPod(pod, ctr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ }
+ } else {
+ if err := r.state.RemoveContainer(ctr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ }
+ }
+
+ // Step 2: Make a new container based on the old one.
+ // TODO: Should we deep-copy the container config and state, to be safe?
+ newCtr := new(Container)
+ newCtr.config = ctr.config
+ newCtr.state = ctr.state
+ newCtr.lock = ctr.lock
+ newCtr.ociRuntime = ctr.ociRuntime
+ newCtr.runtime = r
+ newCtr.rootlessSlirpSyncR = ctr.rootlessSlirpSyncR
+ newCtr.rootlessSlirpSyncW = ctr.rootlessSlirpSyncW
+ newCtr.rootlessPortSyncR = ctr.rootlessPortSyncR
+ newCtr.rootlessPortSyncW = ctr.rootlessPortSyncW
+
+ newCtr.valid = true
+ newCtr.config.Name = newName
+
+ // Step 3: Add that new container to the DB
+ if pod != nil {
+ if err := r.state.AddContainerToPod(pod, newCtr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
+ }
+ } else {
+ if err := r.state.AddContainer(newCtr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
+ }
+ }
+
+ // Step 4: Save the new container, to force the state to be written to
+ // the DB. This may not be necessary, depending on DB implementation,
+ // but let's do it to be safe.
+ if err := newCtr.save(); err != nil {
+ return nil, err
+ }
+
+ // Step 5: rename the container in c/storage.
+ // This can fail if the name is already in use by a non-Podman
+ // container. This puts us in a bad spot - we've already renamed the
+ // container in Podman. We can swap the order, but then we have the
+ // opposite problem. Atomicity is a real problem here, with no easy
+ // solution.
+ if err := r.store.SetNames(newCtr.ID(), []string{newCtr.Name()}); err != nil {
+ return nil, err
+ }
+
+ return newCtr, nil
+}
+
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
if rSpec == nil {
return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container")
@@ -392,7 +527,7 @@ func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool,
// removePod is used only when removing pods. It instructs Podman to ignore
// infra container protections, and *not* remove from the database (as pod
// remove will handle that).
-func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, removeVolume bool, removePod bool) error {
+func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, removeVolume, removePod bool) error {
span, _ := opentracing.StartSpanFromContext(ctx, "removeContainer")
span.SetTag("type", "runtime")
defer span.Finish()
@@ -405,6 +540,18 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ // We need to refresh container config from the DB, to ensure that any
+ // changes (e.g. a rename) are picked up before we start removing.
+ // Since HasContainer above succeeded, we can safely assume the
+ // container exists.
+ // This is *very iffy* but it should be OK because the container won't
+ // exist once we're done.
+ newConf, err := r.state.GetContainerConfig(c.ID())
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", c.ID())
+ }
+ c.config = newConf
+
logrus.Debugf("Removing container %s", c.ID())
// We need to lock the pod before we lock the container.
@@ -412,7 +559,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// Don't need to do this in pod removal case - we're evicting the entire
// pod.
var pod *Pod
- var err error
runtime := c.runtime
if c.config.Pod != "" && !removePod {
pod, err = r.state.Pod(c.config.Pod)
@@ -884,9 +1030,8 @@ func (r *Runtime) GetExecSessionContainer(id string) (*Container, error) {
// PruneContainers removes stopped and exited containers from localstorage. A set of optional filters
// can be provided to be more granular.
-func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) (map[string]int64, map[string]error, error) {
- pruneErrors := make(map[string]error)
- prunedContainers := make(map[string]int64)
+func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) ([]*reports.PruneReport, error) {
+ preports := make([]*reports.PruneReport, 0)
// We add getting the exited and stopped containers via a filter
containerStateFilter := func(c *Container) bool {
if c.PodID() != "" {
@@ -906,23 +1051,28 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) (map[string]int
filterFuncs = append(filterFuncs, containerStateFilter)
delContainers, err := r.GetContainers(filterFuncs...)
if err != nil {
- return nil, nil, err
+ return nil, err
}
for _, c := range delContainers {
- ctr := c
- size, err := ctr.RWSize()
+ report := new(reports.PruneReport)
+ report.Id = c.ID()
+ report.Err = nil
+ report.Size = 0
+ size, err := c.RWSize()
if err != nil {
- pruneErrors[ctr.ID()] = err
+ report.Err = err
+ preports = append(preports, report)
continue
}
- err = r.RemoveContainer(context.Background(), ctr, false, false)
+ err = r.RemoveContainer(context.Background(), c, false, false)
if err != nil {
- pruneErrors[ctr.ID()] = err
+ report.Err = err
} else {
- prunedContainers[ctr.ID()] = size
+ report.Size = (uint64)(size)
}
+ preports = append(preports, report)
}
- return prunedContainers, pruneErrors, nil
+ return preports, nil
}
// MountStorageContainer mounts the storage container's root filesystem
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 965333f77..2c5442bd2 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -279,6 +279,7 @@ func (r *Runtime) LoadImage(ctx context.Context, inputFile string, writer io.Wri
if newImages, err := r.LoadAllImageFromArchive(ctx, writer, inputFile, signaturePolicy); err == nil {
return newImages, nil
}
+
return r.LoadImageFromSingleImageArchive(ctx, writer, inputFile, signaturePolicy)
}
@@ -293,7 +294,7 @@ func (r *Runtime) LoadAllImageFromArchive(ctx context.Context, writer io.Writer,
// LoadImageFromSingleImageArchive load image from the archive of single image that inputFile points to.
func (r *Runtime) LoadImageFromSingleImageArchive(ctx context.Context, writer io.Writer, inputFile, signaturePolicy string) (string, error) {
- var err error
+ var saveErr error
for _, referenceFn := range []func() (types.ImageReference, error){
func() (types.ImageReference, error) {
return dockerarchive.ParseReference(inputFile)
@@ -312,10 +313,12 @@ func (r *Runtime) LoadImageFromSingleImageArchive(ctx context.Context, writer io
if err == nil && src != nil {
if newImages, err := r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer); err == nil {
return getImageNames(newImages), nil
+ } else {
+ saveErr = err
}
}
}
- return "", errors.Wrapf(err, "error pulling image")
+ return "", errors.Wrapf(saveErr, "error pulling image")
}
func getImageNames(images []*image.Image) string {
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index 3e4185db1..dd957527d 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -159,6 +159,34 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawIm
g.AddMount(devPts)
}
+ // Add default sysctls from containers.conf
+ defaultSysctls, err := util.ValidateSysctls(r.config.Sysctls())
+ if err != nil {
+ return nil, err
+ }
+ for sysctlKey, sysctlVal := range defaultSysctls {
+ // Ignore mqueue sysctls if not sharing IPC
+ if !p.config.UsePodIPC && strings.HasPrefix(sysctlKey, "fs.mqueue.") {
+ logrus.Infof("Sysctl %s=%s ignored in containers.conf, since IPC Namespace for pod is unused", sysctlKey, sysctlVal)
+
+ continue
+ }
+
+ // Ignore net sysctls if host network or not sharing network
+ if (p.config.InfraContainer.HostNetwork || !p.config.UsePodNet) && strings.HasPrefix(sysctlKey, "net.") {
+ logrus.Infof("Sysctl %s=%s ignored in containers.conf, since Network Namespace for pod is unused", sysctlKey, sysctlVal)
+ continue
+ }
+
+ // Ignore uts sysctls if not sharing UTS
+ if !p.config.UsePodUTS && (strings.HasPrefix(sysctlKey, "kernel.domainname") || strings.HasPrefix(sysctlKey, "kernel.hostname")) {
+ logrus.Infof("Sysctl %s=%s ignored in containers.conf, since UTS Namespace for pod is unused", sysctlKey, sysctlVal)
+ continue
+ }
+
+ g.AddLinuxSysctl(sysctlKey, sysctlVal)
+ }
+
containerName := p.ID()[:IDTruncLength] + "-infra"
options = append(options, r.WithPod(p))
options = append(options, WithRootFSFromImage(imgID, imgName, rawImageName))
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index 10c32a119..9d985f905 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -5,6 +5,7 @@ import (
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/events"
+ "github.com/containers/podman/v2/pkg/domain/entities/reports"
"github.com/pkg/errors"
)
@@ -133,22 +134,32 @@ func (r *Runtime) GetAllVolumes() ([]*Volume, error) {
}
// PruneVolumes removes unused volumes from the system
-func (r *Runtime) PruneVolumes(ctx context.Context, filterFuncs []VolumeFilter) (map[string]error, error) {
- reports := make(map[string]error)
+func (r *Runtime) PruneVolumes(ctx context.Context, filterFuncs []VolumeFilter) ([]*reports.PruneReport, error) {
+ preports := make([]*reports.PruneReport, 0)
vols, err := r.Volumes(filterFuncs...)
if err != nil {
return nil, err
}
for _, vol := range vols {
+ report := new(reports.PruneReport)
+ volSize, err := vol.Size()
+ if err != nil {
+ volSize = 0
+ }
+ report.Size = volSize
+ report.Id = vol.Name()
if err := r.RemoveVolume(ctx, vol, false); err != nil {
if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved {
- reports[vol.Name()] = err
+ report.Err = err
+ } else {
+ // We didn't remove the volume for some reason
+ continue
}
- continue
+ } else {
+ vol.newVolumeEvent(events.Prune)
}
- vol.newVolumeEvent(events.Prune)
- reports[vol.Name()] = nil
+ preports = append(preports, report)
}
- return reports, nil
+ return preports, nil
}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 9bf0fd108..4a29f01aa 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -11,7 +11,9 @@ import (
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/events"
+ volplugin "github.com/containers/podman/v2/libpod/plugin"
"github.com/containers/storage/pkg/stringid"
+ pluginapi "github.com/docker/go-plugins-helpers/volume"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -53,6 +55,14 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name)
}
+ // Plugin can be nil if driver is local, but that's OK - superfluous
+ // assignment doesn't hurt much.
+ plugin, err := r.getVolumePlugin(volume.config.Driver)
+ if err != nil {
+ return nil, errors.Wrapf(err, "volume %s uses volume plugin %s but it could not be retrieved", volume.config.Name, volume.config.Driver)
+ }
+ volume.plugin = plugin
+
if volume.config.Driver == define.VolumeDriverLocal {
logrus.Debugf("Validating options for local driver")
// Validate options
@@ -66,25 +76,38 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
}
- // Create the mountpoint of this volume
- volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name)
- if err := os.MkdirAll(volPathRoot, 0700); err != nil {
- return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot)
- }
- if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
- }
- fullVolPath := filepath.Join(volPathRoot, "_data")
- if err := os.MkdirAll(fullVolPath, 0755); err != nil {
- return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath)
- }
- if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID)
- }
- if err := LabelVolumePath(fullVolPath); err != nil {
- return nil, err
+ // Now we get conditional: we either need to make the volume in the
+ // volume plugin, or on disk if not using a plugin.
+ if volume.plugin != nil {
+ // We can't chown, or relabel, or similar the path the volume is
+ // using, because it's not managed by us.
+ // TODO: reevaluate this once we actually have volume plugins in
+ // use in production - it may be safe, but I can't tell without
+ // knowing what the actual plugin does...
+ if err := makeVolumeInPluginIfNotExist(volume.config.Name, volume.config.Options, volume.plugin); err != nil {
+ return nil, err
+ }
+ } else {
+ // Create the mountpoint of this volume
+ volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name)
+ if err := os.MkdirAll(volPathRoot, 0700); err != nil {
+ return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot)
+ }
+ if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
+ return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
+ }
+ fullVolPath := filepath.Join(volPathRoot, "_data")
+ if err := os.MkdirAll(fullVolPath, 0755); err != nil {
+ return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath)
+ }
+ if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
+ return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID)
+ }
+ if err := LabelVolumePath(fullVolPath); err != nil {
+ return nil, err
+ }
+ volume.config.MountPoint = fullVolPath
}
- volume.config.MountPoint = fullVolPath
lock, err := r.lockManager.AllocateLock()
if err != nil {
@@ -111,6 +134,39 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
return volume, nil
}
+// makeVolumeInPluginIfNotExist makes a volume in the given volume plugin if it
+// does not already exist.
+func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin *volplugin.VolumePlugin) error {
+ // Ping the volume plugin to see if it exists first.
+ // If it does, use the existing volume in the plugin.
+ // Options may not match exactly, but not much we can do about
+ // that. Not complaining avoids a lot of the sync issues we see
+ // with c/storage and libpod DB.
+ needsCreate := true
+ getReq := new(pluginapi.GetRequest)
+ getReq.Name = name
+ if resp, err := plugin.GetVolume(getReq); err == nil {
+ // TODO: What do we do if we get a 200 response, but the
+ // Volume is nil? The docs on the Plugin API are very
+ // nonspecific, so I don't know if this is valid or
+ // not...
+ if resp != nil {
+ needsCreate = false
+ logrus.Infof("Volume %q already exists in plugin %q, using existing volume", name, plugin.Name)
+ }
+ }
+ if needsCreate {
+ createReq := new(pluginapi.CreateRequest)
+ createReq.Name = name
+ createReq.Options = options
+ if err := plugin.CreateVolume(createReq); err != nil {
+ return errors.Wrapf(err, "error creating volume %q in plugin %s", name, plugin.Name)
+ }
+ }
+
+ return nil
+}
+
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
@@ -185,9 +241,43 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
var removalErr error
+ // If we use a volume plugin, we need to remove from the plugin.
+ if v.UsesVolumeDriver() {
+ canRemove := true
+
+ // Do we have a volume driver?
+ if v.plugin == nil {
+ canRemove = false
+ removalErr = errors.Wrapf(define.ErrMissingPlugin, "cannot remove volume %s from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ } else {
+ // Ping the plugin first to verify the volume still
+ // exists.
+ // We're trying to be very tolerant of missing volumes
+ // in the backend, to avoid the problems we see with
+ // sync between c/storage and the Libpod DB.
+ getReq := new(pluginapi.GetRequest)
+ getReq.Name = v.Name()
+ if _, err := v.plugin.GetVolume(getReq); err != nil {
+ canRemove = false
+ removalErr = errors.Wrapf(err, "volume %s could not be retrieved from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ }
+ }
+ if canRemove {
+ req := new(pluginapi.RemoveRequest)
+ req.Name = v.Name()
+ if err := v.plugin.RemoveVolume(req); err != nil {
+ removalErr = errors.Wrapf(err, "volume %s could not be removed from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ }
+ }
+ }
+
// Free the volume's lock
if err := v.lock.Free(); err != nil {
- removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
+ } else {
+ logrus.Errorf("Error freeing lock for volume %q: %v", v.Name(), err)
+ }
}
// Delete the mountpoint path of the volume, that is delete the volume
@@ -196,7 +286,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
if removalErr == nil {
removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
} else {
- logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err)
+ logrus.Errorf("Error cleaning up volume storage for volume %q: %v", v.Name(), err)
}
}
diff --git a/libpod/volume.go b/libpod/volume.go
index 0535bf4db..4c137cb8e 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -1,10 +1,13 @@
package libpod
import (
+ "os"
+ "path/filepath"
"time"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/lock"
+ "github.com/containers/podman/v2/libpod/plugin"
)
// Volume is a libpod named volume.
@@ -16,6 +19,7 @@ type Volume struct {
state *VolumeState
valid bool
+ plugin *plugin.VolumePlugin
runtime *Runtime
lock lock.Locker
}
@@ -29,7 +33,7 @@ type VolumeConfig struct {
// Labels for the volume.
Labels map[string]string `json:"labels"`
// The volume driver. Empty string or local does not activate a volume
- // driver, all other volumes will.
+ // driver, all other values will.
Driver string `json:"volumeDriver"`
// The location the volume is mounted at.
MountPoint string `json:"mountPoint"`
@@ -51,6 +55,10 @@ type VolumeConfig struct {
// Volumes are not guaranteed to have a state. Only volumes using the Local
// driver that have mount options set will create a state.
type VolumeState struct {
+ // Mountpoint is the location where the volume was mounted.
+ // This is only used for volumes using a volume plugin, which will mount
+ // at non-standard locations.
+ MountPoint string `json:"mountPoint,omitempty"`
// MountCount is the number of times this volume has been requested to
// be mounted.
// It is incremented on mount() and decremented on unmount().
@@ -79,6 +87,18 @@ func (v *Volume) Name() string {
return v.config.Name
}
+// Returns the size on disk of volume
+func (v *Volume) Size() (uint64, error) {
+ var size uint64
+ err := filepath.Walk(v.config.MountPoint, func(path string, info os.FileInfo, err error) error {
+ if err == nil && !info.IsDir() {
+ size += (uint64)(info.Size())
+ }
+ return err
+ })
+ return size, err
+}
+
// Driver retrieves the volume's driver.
func (v *Volume) Driver() string {
return v.config.Driver
@@ -101,8 +121,20 @@ func (v *Volume) Labels() map[string]string {
}
// MountPoint returns the volume's mountpoint on the host
-func (v *Volume) MountPoint() string {
- return v.config.MountPoint
+func (v *Volume) MountPoint() (string, error) {
+ // For the sake of performance, avoid locking unless we have to.
+ if v.UsesVolumeDriver() {
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ if err := v.update(); err != nil {
+ return "", err
+ }
+
+ return v.state.MountPoint, nil
+ }
+
+ return v.config.MountPoint, nil
}
// Options return the volume's options
@@ -125,14 +157,19 @@ func (v *Volume) UID() (int, error) {
v.lock.Lock()
defer v.lock.Unlock()
- if !v.valid {
- return -1, define.ErrVolumeRemoved
+ if err := v.update(); err != nil {
+ return -1, err
}
+ return v.uid(), nil
+}
+
+// Internal, unlocked accessor for UID.
+func (v *Volume) uid() int {
if v.state.UIDChowned > 0 {
- return v.state.UIDChowned, nil
+ return v.state.UIDChowned
}
- return v.config.UID, nil
+ return v.config.UID
}
// GID returns the GID the volume will be created as.
@@ -140,14 +177,19 @@ func (v *Volume) GID() (int, error) {
v.lock.Lock()
defer v.lock.Unlock()
- if !v.valid {
- return -1, define.ErrVolumeRemoved
+ if err := v.update(); err != nil {
+ return -1, err
}
+ return v.gid(), nil
+}
+
+// Internal, unlocked accessor for GID.
+func (v *Volume) gid() int {
if v.state.GIDChowned > 0 {
- return v.state.GIDChowned, nil
+ return v.state.GIDChowned
}
- return v.config.GID, nil
+ return v.config.GID
}
// CreatedTime returns the time the volume was created at. It was not tracked
@@ -184,3 +226,10 @@ func (v *Volume) IsDangling() (bool, error) {
}
return len(ctrs) == 0, nil
}
+
+// UsesVolumeDriver determines whether the volume uses a volume driver. Volume
+// drivers are pluggable backends for volumes that will manage the storage and
+// mounting.
+func (v *Volume) UsesVolumeDriver() bool {
+ return !(v.config.Driver == define.VolumeDriverLocal || v.config.Driver == "")
+}
diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go
index c8b20b8f1..2448d1bb5 100644
--- a/libpod/volume_inspect.go
+++ b/libpod/volume_inspect.go
@@ -1,60 +1,52 @@
package libpod
import (
- "time"
-
"github.com/containers/podman/v2/libpod/define"
+ pluginapi "github.com/docker/go-plugins-helpers/volume"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
-// InspectVolumeData is the output of Inspect() on a volume. It is matched to
-// the format of 'docker volume inspect'.
-type InspectVolumeData struct {
- // Name is the name of the volume.
- Name string `json:"Name"`
- // Driver is the driver used to create the volume.
- // This will be properly implemented in a future version.
- Driver string `json:"Driver"`
- // Mountpoint is the path on the host where the volume is mounted.
- Mountpoint string `json:"Mountpoint"`
- // CreatedAt is the date and time the volume was created at. This is not
- // stored for older Libpod volumes; if so, it will be omitted.
- CreatedAt time.Time `json:"CreatedAt,omitempty"`
- // Status is presently unused and provided only for Docker compatibility.
- // In the future it will be used to return information on the volume's
- // current state.
- Status map[string]string `json:"Status,omitempty"`
- // Labels includes the volume's configured labels, key:value pairs that
- // can be passed during volume creation to provide information for third
- // party tools.
- Labels map[string]string `json:"Labels"`
- // Scope is unused and provided solely for Docker compatibility. It is
- // unconditionally set to "local".
- Scope string `json:"Scope"`
- // Options is a set of options that were used when creating the volume.
- // It is presently not used.
- Options map[string]string `json:"Options"`
- // UID is the UID that the volume was created with.
- UID int `json:"UID,omitempty"`
- // GID is the GID that the volume was created with.
- GID int `json:"GID,omitempty"`
- // Anonymous indicates that the volume was created as an anonymous
- // volume for a specific container, and will be be removed when any
- // container using it is removed.
- Anonymous bool `json:"Anonymous,omitempty"`
-}
-
// Inspect provides detailed information about the configuration of the given
// volume.
-func (v *Volume) Inspect() (*InspectVolumeData, error) {
+func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
if !v.valid {
return nil, define.ErrVolumeRemoved
}
- data := new(InspectVolumeData)
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ if err := v.update(); err != nil {
+ return nil, err
+ }
+
+ data := new(define.InspectVolumeData)
+
+ data.Mountpoint = v.config.MountPoint
+ if v.UsesVolumeDriver() {
+ logrus.Debugf("Querying volume plugin %s for status", v.config.Driver)
+ data.Mountpoint = v.state.MountPoint
+
+ if v.plugin == nil {
+ return nil, errors.Wrapf(define.ErrMissingPlugin, "volume %s uses volume plugin %s but it is not available, cannot inspect", v.Name(), v.config.Driver)
+ }
+
+ // Retrieve status for the volume.
+ // Need to query the volume driver.
+ req := new(pluginapi.GetRequest)
+ req.Name = v.Name()
+ resp, err := v.plugin.GetVolume(req)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving volume %s information from plugin %s", v.Name(), v.Driver())
+ }
+ if resp != nil {
+ data.Status = resp.Status
+ }
+ }
data.Name = v.config.Name
data.Driver = v.config.Driver
- data.Mountpoint = v.config.MountPoint
data.CreatedAt = v.config.CreatedTime
data.Labels = make(map[string]string)
for k, v := range v.config.Labels {
@@ -65,15 +57,8 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) {
for k, v := range v.config.Options {
data.Options[k] = v
}
- var err error
- data.UID, err = v.UID()
- if err != nil {
- return nil, err
- }
- data.GID, err = v.GID()
- if err != nil {
- return nil, err
- }
+ data.UID = v.uid()
+ data.GID = v.gid()
data.Anonymous = v.config.IsAnon
return data, nil
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 95cb752e0..88d940370 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -22,13 +22,24 @@ func newVolume(runtime *Runtime) *Volume {
// teardownStorage deletes the volume from volumePath
func (v *Volume) teardownStorage() error {
+ if v.UsesVolumeDriver() {
+ return nil
+ }
+
+ // TODO: Should this be converted to use v.config.MountPoint?
return os.RemoveAll(filepath.Join(v.runtime.config.Engine.VolumePath, v.Name()))
}
// Volumes with options set, or a filesystem type, or a device to mount need to
// be mounted and unmounted.
func (v *Volume) needsMount() bool {
- return len(v.config.Options) > 0 && v.config.Driver == define.VolumeDriverLocal
+ // Non-local driver always needs mount
+ if v.UsesVolumeDriver() {
+ return true
+ }
+
+ // Local driver with options needs mount
+ return len(v.config.Options) > 0
}
// update() updates the volume state from the DB.
diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go
index bbf47f124..e184505e7 100644
--- a/libpod/volume_internal_linux.go
+++ b/libpod/volume_internal_linux.go
@@ -8,11 +8,17 @@ import (
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/rootless"
+ pluginapi "github.com/docker/go-plugins-helpers/volume"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
+// This is a pseudo-container ID to use when requesting a mount or unmount from
+// the volume plugins.
+// This is the shas256 of the string "placeholder\n".
+const pseudoCtrID = "2f73349cfc4630255319c6c8dfc1b46a8996ace9d14d8e07563b165915918ec2"
+
// mount mounts the volume if necessary.
// A mount is necessary if a volume has any options set.
// If a mount is necessary, v.state.MountCount will be incremented.
@@ -20,7 +26,7 @@ import (
// host. Otherwise, we assume it is already mounted.
// Must be done while the volume is locked.
// Is a no-op on volumes that do not require a mount (as defined by
-// volumeNeedsMount())
+// volumeNeedsMount()).
func (v *Volume) mount() error {
if !v.needsMount() {
return nil
@@ -44,6 +50,28 @@ func (v *Volume) mount() error {
return v.save()
}
+ // Volume plugins implement their own mount counter, based on the ID of
+ // the mounting container. But we already have one, and honestly I trust
+ // ours more. So hardcode container ID to something reasonable, and use
+ // the same one for everything.
+ if v.UsesVolumeDriver() {
+ if v.plugin == nil {
+ return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
+ }
+
+ req := new(pluginapi.MountRequest)
+ req.Name = v.Name()
+ req.ID = pseudoCtrID
+ mountPoint, err := v.plugin.MountVolume(req)
+ if err != nil {
+ return err
+ }
+
+ v.state.MountCount += 1
+ v.state.MountPoint = mountPoint
+ return v.save()
+ }
+
volDevice := v.config.Options["device"]
volType := v.config.Options["type"]
volOptions := v.config.Options["o"]
@@ -132,6 +160,22 @@ func (v *Volume) unmount(force bool) error {
logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount)
if v.state.MountCount == 0 {
+ if v.UsesVolumeDriver() {
+ if v.plugin == nil {
+ return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
+ }
+
+ req := new(pluginapi.UnmountRequest)
+ req.Name = v.Name()
+ req.ID = pseudoCtrID
+ if err := v.plugin.UnmountVolume(req); err != nil {
+ return err
+ }
+
+ v.state.MountPoint = ""
+ return v.save()
+ }
+
// Unmount the volume
if err := unix.Unmount(v.config.MountPoint, unix.MNT_DETACH); err != nil {
if err == unix.EINVAL {