aboutsummaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/adapter/pods.go38
-rw-r--r--libpod/adapter/pods_remote.go44
-rw-r--r--libpod/adapter/runtime.go23
-rw-r--r--libpod/adapter/runtime_remote.go132
-rw-r--r--libpod/adapter/shortcuts/shortcuts.go27
-rw-r--r--libpod/boltdb_state.go92
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/image/image.go69
-rw-r--r--libpod/image/utils.go26
-rw-r--r--libpod/in_memory_state.go64
-rw-r--r--libpod/lock/in_memory_locks.go11
-rw-r--r--libpod/lock/lock.go14
-rw-r--r--libpod/lock/shm/shm_lock.c36
-rw-r--r--libpod/lock/shm/shm_lock.go16
-rw-r--r--libpod/lock/shm/shm_lock.h1
-rw-r--r--libpod/lock/shm/shm_lock_test.go28
-rw-r--r--libpod/lock/shm_lock_manager_linux.go7
-rw-r--r--libpod/lock/shm_lock_manager_unsupported.go5
-rw-r--r--libpod/options.go16
-rw-r--r--libpod/runtime.go43
-rw-r--r--libpod/runtime_ctr.go2
-rw-r--r--libpod/runtime_img.go44
-rw-r--r--libpod/runtime_renumber.go57
-rw-r--r--libpod/runtime_volume.go13
-rw-r--r--libpod/runtime_volume_linux.go31
-rw-r--r--libpod/state.go24
-rw-r--r--libpod/state_test.go72
-rw-r--r--libpod/volume.go5
-rw-r--r--libpod/volume_internal.go3
29 files changed, 859 insertions, 91 deletions
diff --git a/libpod/adapter/pods.go b/libpod/adapter/pods.go
new file mode 100644
index 000000000..59642c42e
--- /dev/null
+++ b/libpod/adapter/pods.go
@@ -0,0 +1,38 @@
+// +build !remoteclient
+
+package adapter
+
+import (
+ "context"
+ "github.com/containers/libpod/libpod/adapter/shortcuts"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/libpod"
+)
+
+// Pod ...
+type Pod struct {
+ *libpod.Pod
+}
+
+// RemovePods ...
+func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) {
+ var (
+ errs []error
+ podids []string
+ )
+ pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
+ if err != nil {
+ errs = append(errs, err)
+ return nil, errs
+ }
+
+ for _, p := range pods {
+ if err := r.RemovePod(ctx, p, cli.Force, cli.Force); err != nil {
+ errs = append(errs, err)
+ } else {
+ podids = append(podids, p.ID())
+ }
+ }
+ return podids, errs
+}
diff --git a/libpod/adapter/pods_remote.go b/libpod/adapter/pods_remote.go
new file mode 100644
index 000000000..3fb147f48
--- /dev/null
+++ b/libpod/adapter/pods_remote.go
@@ -0,0 +1,44 @@
+// +build remoteclient
+
+package adapter
+
+import (
+ "context"
+
+ "github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/cmd/podman/varlink"
+ "github.com/containers/libpod/libpod"
+)
+
+// Pod ...
+type Pod struct {
+ remotepod
+}
+
+type remotepod struct {
+ config *libpod.PodConfig
+ state *libpod.PodInspectState
+ Runtime *LocalRuntime
+}
+
+func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValues) ([]string, []error) {
+ var (
+ rmErrs []error
+ rmPods []string
+ )
+ podIDs, err := iopodman.GetPodsByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
+ if err != nil {
+ rmErrs = append(rmErrs, err)
+ return nil, rmErrs
+ }
+
+ for _, p := range podIDs {
+ reply, err := iopodman.RemovePod().Call(r.Conn, p, cli.Force)
+ if err != nil {
+ rmErrs = append(rmErrs, err)
+ } else {
+ rmPods = append(rmPods, reply)
+ }
+ }
+ return rmPods, rmErrs
+}
diff --git a/libpod/adapter/runtime.go b/libpod/adapter/runtime.go
index 02ef9af07..4f5b98dbb 100644
--- a/libpod/adapter/runtime.go
+++ b/libpod/adapter/runtime.go
@@ -310,3 +310,26 @@ func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, opti
func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) {
return r.Runtime.PruneVolumes(ctx)
}
+
+// SaveImage is a wrapper function for saving an image to the local filesystem
+func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error {
+ source := c.InputArgs[0]
+ additionalTags := c.InputArgs[1:]
+
+ newImage, err := r.Runtime.ImageRuntime().NewFromLocal(source)
+ if err != nil {
+ return err
+ }
+ return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress)
+}
+
+// LoadImage is a wrapper function for libpod PruneVolumes
+func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) {
+ var (
+ writer io.Writer
+ )
+ if !cli.Quiet {
+ writer = os.Stderr
+ }
+ return r.Runtime.LoadImage(ctx, name, cli.Input, writer, cli.SignaturePolicy)
+}
diff --git a/libpod/adapter/runtime_remote.go b/libpod/adapter/runtime_remote.go
index b1d4d4d25..ca2fad852 100644
--- a/libpod/adapter/runtime_remote.go
+++ b/libpod/adapter/runtime_remote.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/utils"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -385,8 +386,11 @@ func (r *LocalRuntime) Export(name string, path string) error {
if err != nil {
return err
}
+ return r.GetFileFromRemoteHost(tempPath, path, true)
+}
- outputFile, err := os.Create(path)
+func (r *LocalRuntime) GetFileFromRemoteHost(remoteFilePath, outputPath string, delete bool) error {
+ outputFile, err := os.Create(outputPath)
if err != nil {
return err
}
@@ -395,7 +399,7 @@ func (r *LocalRuntime) Export(name string, path string) error {
writer := bufio.NewWriter(outputFile)
defer writer.Flush()
- reply, err := iopodman.ReceiveFile().Send(r.Conn, varlink.Upgrade, tempPath, true)
+ reply, err := iopodman.ReceiveFile().Send(r.Conn, varlink.Upgrade, remoteFilePath, delete)
if err != nil {
return err
}
@@ -409,7 +413,6 @@ func (r *LocalRuntime) Export(name string, path string) error {
if _, err := io.CopyN(writer, reader, length); err != nil {
return errors.Wrap(err, "file transer failed")
}
-
return nil
}
@@ -467,34 +470,23 @@ func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, opti
Squash: options.Squash,
}
// tar the file
- logrus.Debugf("creating tarball of context dir %s", options.ContextDirectory)
- input, err := archive.Tar(options.ContextDirectory, archive.Uncompressed)
- if err != nil {
- return errors.Wrapf(err, "unable to create tarball of context dir %s", options.ContextDirectory)
- }
-
- // Write the tarball to the fs
- // TODO we might considering sending this without writing to the fs for the sake of performance
- // under given conditions like memory availability.
outputFile, err := ioutil.TempFile("", "varlink_tar_send")
if err != nil {
return err
}
defer outputFile.Close()
- logrus.Debugf("writing context dir tarball to %s", outputFile.Name())
+ defer os.Remove(outputFile.Name())
- _, err = io.Copy(outputFile, input)
- if err != nil {
+ // Create the tarball of the context dir to a tempfile
+ if err := utils.TarToFilesystem(options.ContextDirectory, outputFile); err != nil {
return err
}
-
- logrus.Debugf("completed writing context dir tarball %s", outputFile.Name())
// Send the context dir tarball over varlink.
tempFile, err := r.SendFileOverVarlink(outputFile.Name())
if err != nil {
return err
}
- buildinfo.ContextDir = strings.Replace(tempFile, ":", "", -1)
+ buildinfo.ContextDir = tempFile
reply, err := iopodman.BuildImage().Send(r.Conn, varlink.More, buildinfo)
if err != nil {
@@ -557,7 +549,7 @@ func (r *LocalRuntime) SendFileOverVarlink(source string) (string, error) {
}
- return tempFile, nil
+ return strings.Replace(tempFile, ":", "", -1), nil
}
// GetAllVolumes retrieves all the volumes
@@ -702,3 +694,105 @@ func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) {
}
return prunedNames, errs
}
+
+// SaveImage is a wrapper function for saving an image to the local filesystem
+func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error {
+ source := c.InputArgs[0]
+ additionalTags := c.InputArgs[1:]
+
+ options := iopodman.ImageSaveOptions{
+ Name: source,
+ Format: c.Format,
+ Output: c.Output,
+ MoreTags: additionalTags,
+ Quiet: c.Quiet,
+ Compress: c.Compress,
+ }
+ reply, err := iopodman.ImageSave().Send(r.Conn, varlink.More, options)
+ if err != nil {
+ return err
+ }
+
+ var fetchfile string
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ return err
+ }
+ if len(responses.Id) > 0 {
+ fetchfile = responses.Id
+ }
+ for _, line := range responses.Logs {
+ fmt.Print(line)
+ }
+ if flags&varlink.Continues == 0 {
+ break
+ }
+
+ }
+ if err != nil {
+ return err
+ }
+
+ outputToDir := false
+ outfile := c.Output
+ var outputFile *os.File
+ // If the result is supposed to be a dir, then we need to put the tarfile
+ // from the host in a temporary file
+ if options.Format != "oci-archive" && options.Format != "docker-archive" {
+ outputToDir = true
+ outputFile, err = ioutil.TempFile("", "saveimage_tempfile")
+ if err != nil {
+ return err
+ }
+ outfile = outputFile.Name()
+ defer outputFile.Close()
+ defer os.Remove(outputFile.Name())
+ }
+ // We now need to fetch the tarball result back to the more system
+ if err := r.GetFileFromRemoteHost(fetchfile, outfile, true); err != nil {
+ return err
+ }
+
+ // If the result is a tarball, we're done
+ // If it is a dir, we need to untar the temporary file into the dir
+ if outputToDir {
+ if err := utils.UntarToFileSystem(c.Output, outputFile, &archive.TarOptions{}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// LoadImage loads a container image from a remote client's filesystem
+func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) {
+ var names string
+ remoteTempFile, err := r.SendFileOverVarlink(cli.Input)
+ if err != nil {
+ return "", nil
+ }
+ more := varlink.More
+ if cli.Quiet {
+ more = 0
+ }
+ reply, err := iopodman.LoadImage().Send(r.Conn, uint64(more), name, remoteTempFile, cli.Quiet, true)
+ if err != nil {
+ return "", err
+ }
+
+ for {
+ responses, flags, err := reply()
+ if err != nil {
+ logrus.Error(err)
+ return "", err
+ }
+ for _, line := range responses.Logs {
+ fmt.Print(line)
+ }
+ names = responses.Id
+ if flags&varlink.Continues == 0 {
+ break
+ }
+ }
+ return names, nil
+}
diff --git a/libpod/adapter/shortcuts/shortcuts.go b/libpod/adapter/shortcuts/shortcuts.go
new file mode 100644
index 000000000..0633399ae
--- /dev/null
+++ b/libpod/adapter/shortcuts/shortcuts.go
@@ -0,0 +1,27 @@
+package shortcuts
+
+import "github.com/containers/libpod/libpod"
+
+// GetPodsByContext gets pods whether all, latest, or a slice of names/ids
+func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) {
+ var outpods []*libpod.Pod
+ if all {
+ return runtime.GetAllPods()
+ }
+ if latest {
+ p, err := runtime.GetLatestPod()
+ if err != nil {
+ return nil, err
+ }
+ outpods = append(outpods, p)
+ return outpods, nil
+ }
+ for _, p := range pods {
+ pod, err := runtime.LookupPod(p)
+ if err != nil {
+ return nil, err
+ }
+ outpods = append(outpods, pod)
+ }
+ return outpods, nil
+}
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 5bc15dd7f..25ef5cd0e 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -783,6 +783,94 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
+// RewriteContainerConfig rewrites a container's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ ctrBkt, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
+ if ctrDB == nil {
+ ctr.valid = false
+ return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ }
+
+ if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// RewritePodConfig rewrites a pod's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !pod.valid {
+ return ErrPodRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ podBkt, err := getPodBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ podDB := podBkt.Bucket([]byte(pod.ID()))
+ if podDB == nil {
+ pod.valid = false
+ return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ }
+
+ if err := podDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating pod %s config JSON", pod.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
@@ -1281,10 +1369,6 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return ErrDBClosed
}
- if !volume.valid {
- return ErrVolumeRemoved
- }
-
volName := []byte(volume.Name())
db, err := s.getDBCon()
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index ea150cfac..3d749849d 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -348,13 +348,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
- // Get the lock
- lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
- if err != nil {
- return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
- }
- volume.lock = lock
-
volume.runtime = s.runtime
volume.valid = true
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 028a795ea..b20419d7b 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -5,14 +5,18 @@ import (
"encoding/json"
"fmt"
"io"
+ "os"
"strings"
"syscall"
"time"
types2 "github.com/containernetworking/cni/pkg/types"
cp "github.com/containers/image/copy"
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ ociarchive "github.com/containers/image/oci/archive"
is "github.com/containers/image/storage"
"github.com/containers/image/tarball"
"github.com/containers/image/transports"
@@ -26,8 +30,9 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/reexec"
digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -1084,3 +1089,65 @@ func (i *Image) Comment(ctx context.Context, manifestType string) (string, error
}
return ociv1Img.History[0].Comment, nil
}
+
+// Save writes a container image to the filesystem
+func (i *Image) Save(ctx context.Context, source, format, output string, moreTags []string, quiet, compress bool) error {
+ var (
+ writer io.Writer
+ destRef types.ImageReference
+ manifestType string
+ err error
+ )
+
+ if quiet {
+ writer = os.Stderr
+ }
+ switch format {
+ case "oci-archive":
+ destImageName := imageNameForSaveDestination(i, source)
+ destRef, err = ociarchive.NewReference(output, destImageName) // destImageName may be ""
+ if err != nil {
+ return errors.Wrapf(err, "error getting OCI archive ImageReference for (%q, %q)", output, destImageName)
+ }
+ case "oci-dir":
+ destRef, err = directory.NewReference(output)
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
+ }
+ manifestType = imgspecv1.MediaTypeImageManifest
+ case "docker-dir":
+ destRef, err = directory.NewReference(output)
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
+ }
+ manifestType = manifest.DockerV2Schema2MediaType
+ case "docker-archive", "":
+ dst := output
+ destImageName := imageNameForSaveDestination(i, source)
+ if destImageName != "" {
+ dst = fmt.Sprintf("%s:%s", dst, destImageName)
+ }
+ destRef, err = dockerarchive.ParseReference(dst) // FIXME? Add dockerarchive.NewReference
+ if err != nil {
+ return errors.Wrapf(err, "error getting Docker archive ImageReference for %q", dst)
+ }
+ default:
+ return errors.Errorf("unknown format option %q", format)
+ }
+ // supports saving multiple tags to the same tar archive
+ var additionaltags []reference.NamedTagged
+ if len(moreTags) > 0 {
+ additionaltags, err = GetAdditionalTags(moreTags)
+ if err != nil {
+ return err
+ }
+ }
+ if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil {
+ if err2 := os.Remove(output); err2 != nil {
+ logrus.Errorf("error deleting %q: %v", output, err)
+ }
+ return errors.Wrapf(err, "unable to save %q", source)
+ }
+
+ return nil
+}
diff --git a/libpod/image/utils.go b/libpod/image/utils.go
index 3585428ad..544796a4b 100644
--- a/libpod/image/utils.go
+++ b/libpod/image/utils.go
@@ -1,6 +1,7 @@
package image
import (
+ "fmt"
"io"
"net/url"
"regexp"
@@ -148,3 +149,28 @@ func IsValidImageURI(imguri string) (bool, error) {
}
return true, nil
}
+
+// imageNameForSaveDestination returns a Docker-like reference appropriate for saving img,
+// which the user referred to as imgUserInput; or an empty string, if there is no appropriate
+// reference.
+func imageNameForSaveDestination(img *Image, imgUserInput string) string {
+ if strings.Contains(img.ID(), imgUserInput) {
+ return ""
+ }
+
+ prepend := ""
+ localRegistryPrefix := fmt.Sprintf("%s/", DefaultLocalRegistry)
+ if !strings.HasPrefix(imgUserInput, localRegistryPrefix) {
+ // we need to check if localhost was added to the image name in NewFromLocal
+ for _, name := range img.Names() {
+ // If the user is saving an image in the localhost registry, getLocalImage need
+ // a name that matches the format localhost/<tag1>:<tag2> or localhost/<tag>:latest to correctly
+ // set up the manifest and save.
+ if strings.HasPrefix(name, localRegistryPrefix) && (strings.HasSuffix(name, imgUserInput) || strings.HasSuffix(name, fmt.Sprintf("%s:latest", imgUserInput))) {
+ prepend = localRegistryPrefix
+ break
+ }
+ }
+ }
+ return fmt.Sprintf("%s%s", prepend, imgUserInput)
+}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 314799309..ab4fc8ba7 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -378,6 +378,58 @@ func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) {
return arr, nil
}
+// AllContainers retrieves all containers from the state
+func (s *InMemoryState) AllContainers() ([]*Container, error) {
+ ctrs := make([]*Container, 0, len(s.containers))
+ for _, ctr := range s.containers {
+ if s.namespace == "" || ctr.config.Namespace == s.namespace {
+ ctrs = append(ctrs, ctr)
+ }
+ }
+
+ return ctrs, nil
+}
+
+// RewriteContainerConfig rewrites a container's configuration.
+// This function is DANGEROUS, even with an in-memory state.
+// Please read the full comment on it in state.go before using it.
+func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ // If the container does not exist, return error
+ stateCtr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ }
+
+ stateCtr.config = newCfg
+
+ return nil
+}
+
+// RewritePodConfig rewrites a pod's configuration.
+// This function is DANGEROUS, even with in-memory state.
+// Please read the full comment on it in state.go before using it.
+func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
+ if !pod.valid {
+ return ErrPodRemoved
+ }
+
+ // If the pod does not exist, return error
+ statePod, ok := s.pods[pod.ID()]
+ if !ok {
+ pod.valid = false
+ return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found in state", pod.ID())
+ }
+
+ statePod.config = newCfg
+
+ return nil
+}
+
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
@@ -486,18 +538,6 @@ func (s *InMemoryState) AllVolumes() ([]*Volume, error) {
return allVols, nil
}
-// AllContainers retrieves all containers from the state
-func (s *InMemoryState) AllContainers() ([]*Container, error) {
- ctrs := make([]*Container, 0, len(s.containers))
- for _, ctr := range s.containers {
- if s.namespace == "" || ctr.config.Namespace == s.namespace {
- ctrs = append(ctrs, ctr)
- }
- }
-
- return ctrs, nil
-}
-
// Pod retrieves a pod from the state from its full ID
func (s *InMemoryState) Pod(id string) (*Pod, error) {
if id == "" {
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index db8f20e95..7c9605917 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -89,3 +89,14 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
+
+// FreeAllLocks frees all locks.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *InMemoryManager) FreeAllLocks() error {
+ for _, lock := range m.locks {
+ lock.allocated = false
+ }
+
+ return nil
+}
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index 1f94171fe..d6841646b 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -24,6 +24,20 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
+ // FreeAllLocks frees all allocated locks, in preparation for lock
+ // reallocation.
+ // As this deallocates all presently-held locks, this can be very
+ // dangerous - if there are other processes running that might be
+ // attempting to allocate new locks and free existing locks, we may
+ // encounter races leading to an inconsistent state.
+ // (This is in addition to the fact that FreeAllLocks instantly makes
+ // the state inconsistent simply by using it, and requires a full
+ // lock renumbering to restore consistency!).
+ // In short, this should only be used as part of unit tests, or lock
+ // renumbering, where reasonable guarantees about other processes can be
+ // made.
+ FreeAllLocks() error
}
// Locker is similar to sync.Locker, but provides a method for freeing the lock
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index 4af58d857..d11fce71a 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -203,6 +203,8 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// terminating NULL byte.
// Returns a valid pointer on success or NULL on error.
// If an error occurs, negative ERRNO values will be written to error_code.
+// ERANGE is returned for a mismatch between num_locks and the number of locks
+// available in the the SHM lock struct.
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
int shm_fd;
shm_struct_t *shm;
@@ -255,11 +257,11 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// Need to check the SHM to see if it's actually our locks
if (shm->magic != MAGIC) {
- *error_code = -1 * errno;
+ *error_code = -1 * EBADF;
goto CLEANUP;
}
if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) {
- *error_code = -1 * errno;
+ *error_code = -1 * ERANGE;
goto CLEANUP;
}
@@ -407,6 +409,36 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
return 0;
}
+// Deallocate all semaphores unconditionally.
+// Returns negative ERRNO values.
+int32_t deallocate_all_semaphores(shm_struct_t *shm) {
+ int ret_code;
+ uint i;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Iterate through all bitmaps and reset to unused
+ for (i = 0; i < shm->num_bitmaps; i++) {
+ shm->locks[i].bitmap = 0;
+ }
+
+ // Unlock the allocation control mutex
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
// Lock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index 87d28e5c1..e70ea8743 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -155,6 +155,22 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
return nil
}
+// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
+// other containers and pods.
+func (locks *SHMLocks) DeallocateAllSemaphores() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.deallocate_all_semaphores(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno return from C
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
// LockSemaphore locks the given semaphore.
// If the semaphore is already locked, LockSemaphore will block until the lock
// can be acquired.
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 8e7e23fb7..58e4297e2 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -40,6 +40,7 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go
index 594eb5d8e..830035881 100644
--- a/libpod/lock/shm/shm_lock_test.go
+++ b/libpod/lock/shm/shm_lock_test.go
@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"runtime"
- "syscall"
"testing"
"time"
@@ -53,12 +52,8 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
}
defer func() {
// Deallocate all locks
- // Ignore ENOENT (lock is not allocated)
- var i uint32
- for i = 0; i < numLocks; i++ {
- if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
- t.Fatalf("Error deallocating semaphore %d: %v", i, err)
- }
+ if err := locks.DeallocateAllSemaphores(); err != nil {
+ t.Fatalf("Error deallocating semaphores: %v", err)
}
if err := locks.Close(); err != nil {
@@ -212,6 +207,25 @@ func TestAllocateDeallocateCycle(t *testing.T) {
})
}
+// Test that DeallocateAllSemaphores deallocates all semaphores
+func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate a lock
+ locks1, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ // Free all locks
+ err = locks.DeallocateAllSemaphores()
+ assert.NoError(t, err)
+
+ // Allocate another lock
+ locks2, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ assert.Equal(t, locks1, locks2)
+ })
+}
+
// Test that locks actually lock
func TestLockSemaphoreActuallyLocks(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 94dfd7dd7..8678958ee 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -71,6 +71,13 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
return lock, nil
}
+// FreeAllLocks frees all locks in the manager.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *SHMLockManager) FreeAllLocks() error {
+ return m.locks.DeallocateAllSemaphores()
+}
+
// SHMLock is an individual shared memory lock.
type SHMLock struct {
lockID uint32
diff --git a/libpod/lock/shm_lock_manager_unsupported.go b/libpod/lock/shm_lock_manager_unsupported.go
index cbdb2f7bc..1d6e3fcbd 100644
--- a/libpod/lock/shm_lock_manager_unsupported.go
+++ b/libpod/lock/shm_lock_manager_unsupported.go
@@ -27,3 +27,8 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) {
return nil, fmt.Errorf("not supported")
}
+
+// FreeAllLocks is not supported on this platform
+func (m *SHMLockManager) FreeAllLocks() error {
+ return fmt.Errorf("not supported")
+}
diff --git a/libpod/options.go b/libpod/options.go
index 06737776b..7c37fd65b 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -394,6 +394,22 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
}
}
+// WithRenumber instructs libpod to perform a lock renumbering while
+// initializing. This will handle migrations from early versions of libpod with
+// file locks to newer versions with SHM locking, as well as changes in the
+// number of configured locks.
+func WithRenumber() RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.doRenumber = true
+
+ return nil
+ }
+}
+
// Container Creation Options
// WithShmDir sets the directory that should be mounted on /dev/shm.
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 4f5d1e292..94dbf37dd 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -7,6 +7,7 @@ import (
"os/exec"
"path/filepath"
"sync"
+ "syscall"
"github.com/BurntSushi/toml"
is "github.com/containers/image/storage"
@@ -79,7 +80,8 @@ type RuntimeOption func(*Runtime) error
// Runtime is the core libpod runtime
type Runtime struct {
- config *RuntimeConfig
+ config *RuntimeConfig
+
state State
store storage.Store
storageService *storageService
@@ -88,12 +90,23 @@ type Runtime struct {
netPlugin ocicni.CNIPlugin
ociRuntimePath OCIRuntimePath
conmonPath string
- valid bool
- lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
lockManager lock.Manager
configuredFrom *runtimeConfiguredFrom
+
+ // doRenumber indicates that the runtime should perform a lock renumber
+ // during initialization.
+ // Once the runtime has been initialized and returned, this variable is
+ // unused.
+ doRenumber bool
+
+ // valid indicates whether the runtime is ready to use.
+ // valid is set to true when a runtime is returned from GetRuntime(),
+ // and remains true until the runtime is shut down (rendering its
+ // storage unusable). When valid is false, the runtime cannot be used.
+ valid bool
+ lock sync.RWMutex
}
// OCIRuntimePath contains information about an OCI runtime.
@@ -753,6 +766,7 @@ func makeRuntime(runtime *Runtime) (err error) {
aliveLock.Unlock()
}
}()
+
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If the file doesn't exist, we need to refresh the state
@@ -778,12 +792,35 @@ func makeRuntime(runtime *Runtime) (err error) {
if err != nil {
return err
}
+ } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
+ logrus.Debugf("Number of locks does not match - removing old locks")
+
+ // ERANGE indicates a lock numbering mismatch.
+ // Since we're renumbering, this is not fatal.
+ // Remove the earlier set of locks and recreate.
+ if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
+ return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ }
+
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return err
+ }
} else {
return err
}
}
runtime.lockManager = manager
+ // If we're renumbering locks, do it now.
+ // It breaks out of normal runtime init, and will not return a valid
+ // runtime.
+ if runtime.doRenumber {
+ if err := runtime.renumberLocks(); err != nil {
+ return err
+ }
+ }
+
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 1055da75b..2ec8d0795 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -421,7 +421,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
for _, v := range volumes {
if volume, err := runtime.state.Volume(v); err == nil {
- if err := runtime.removeVolume(ctx, volume, false, true); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
+ if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
logrus.Errorf("cleanup volume (%s): %v", v, err)
}
}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 1e9689362..451c2ebe7 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -3,7 +3,6 @@ package libpod
import (
"context"
"fmt"
- "github.com/opencontainers/image-spec/specs-go/v1"
"io"
"io/ioutil"
"net/http"
@@ -15,6 +14,11 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
"github.com/pkg/errors"
+
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
+ ociarchive "github.com/containers/image/oci/archive"
+ "github.com/opencontainers/image-spec/specs-go/v1"
)
// Runtime API
@@ -211,3 +215,41 @@ func downloadFromURL(source string) (string, error) {
return outFile.Name(), nil
}
+
+// LoadImage loads a container image into local storage
+func (r *Runtime) LoadImage(ctx context.Context, name, inputFile string, writer io.Writer, signaturePolicy string) (string, error) {
+ var newImages []*image.Image
+ src, err := dockerarchive.ParseReference(inputFile) // FIXME? We should add dockerarchive.NewReference()
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ // generate full src name with specified image:tag
+ src, err := ociarchive.NewReference(inputFile, name) // imageName may be ""
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ src, err := directory.NewReference(inputFile)
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ return "", errors.Wrapf(err, "error pulling %q", name)
+ }
+ }
+ }
+ return getImageNames(newImages), nil
+}
+
+func getImageNames(images []*image.Image) string {
+ var names string
+ for i := range images {
+ if i == 0 {
+ names = images[i].InputName
+ } else {
+ names += ", " + images[i].InputName
+ }
+ }
+ return names
+}
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
new file mode 100644
index 000000000..125cf0825
--- /dev/null
+++ b/libpod/runtime_renumber.go
@@ -0,0 +1,57 @@
+package libpod
+
+import (
+ "github.com/pkg/errors"
+)
+
+// renumberLocks reassigns lock numbers for all containers and pods in the
+// state.
+// TODO: It would be desirable to make it impossible to call this until all
+// other libpod sessions are dead.
+// Possibly use a read-write file lock, with all non-renumber podmans owning the
+// lock as read, renumber attempting to take a write lock?
+// The alternative is some sort of session tracking, and I don't know how
+// reliable that can be.
+func (r *Runtime) renumberLocks() error {
+ // Start off by deallocating all locks
+ if err := r.lockManager.FreeAllLocks(); err != nil {
+ return err
+ }
+
+ allCtrs, err := r.state.AllContainers()
+ if err != nil {
+ return err
+ }
+ for _, ctr := range allCtrs {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
+ }
+
+ ctr.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
+ return err
+ }
+ }
+ allPods, err := r.state.AllPods()
+ if err != nil {
+ return err
+ }
+ for _, pod := range allPods {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
+ }
+
+ pod.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewritePodConfig(pod, pod.config); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index beae50ac9..11f37ad4b 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -19,7 +19,7 @@ type VolumeCreateOption func(*Volume) error
type VolumeFilter func(*Volume) bool
// RemoveVolume removes a volumes
-func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
@@ -35,10 +35,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- v.lock.Lock()
- defer v.lock.Unlock()
-
- return r.removeVolume(ctx, v, force, prune)
+ return r.removeVolume(ctx, v, force)
}
// RemoveVolumes removes a slice of volumes or all with a force bool
@@ -64,7 +61,7 @@ func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, forc
}
for _, vol := range vols {
- if err := r.RemoveVolume(ctx, vol, force, false); err != nil {
+ if err := r.RemoveVolume(ctx, vol, force); err != nil {
return deletedVols, err
}
logrus.Debugf("removed volume %s", vol.Name())
@@ -168,8 +165,8 @@ func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) {
}
for _, vol := range vols {
- if err := r.RemoveVolume(ctx, vol, false, true); err != nil {
- if err != ErrVolumeBeingUsed {
+ if err := r.RemoveVolume(ctx, vol, false); err != nil {
+ if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved {
pruneErrors = append(pruneErrors, err)
}
continue
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 0727cfedf..838c0167a 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
- lock, err := r.lockManager.AllocateLock()
- if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new volume")
- }
- volume.lock = lock
- volume.config.LockID = volume.lock.ID()
-
volume.valid = true
// Add the volume to state
@@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
-func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
- return ErrNoSuchVolume
+ if ok, _ := r.state.HasVolume(v.Name()); !ok {
+ return nil
+ }
+ return ErrVolumeRemoved
}
deps, err := r.state.VolumeInUse(v)
@@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
return err
}
if len(deps) != 0 {
- if prune {
- return ErrVolumeBeingUsed
- }
depsStr := strings.Join(deps, ", ")
if !force {
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
@@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
- if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
- }
+ // Set volume as invalid so it can no longer be used
+ v.valid = false
// Remove the volume from the state
if err := r.state.RemoveVolume(v); err != nil {
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Set volume as invalid so it can no longer be used
- v.valid = false
+ // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ if err := v.teardownStorage(); err != nil {
+ return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ }
+
+ logrus.Debugf("Removed volume %s", v.Name())
return nil
}
diff --git a/libpod/state.go b/libpod/state.go
index 88d89f673..98282fc83 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -97,6 +97,30 @@ type State interface {
// returned.
AllContainers() ([]*Container, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
+ // Rewrite a container's configuration.
+ // This function breaks libpod's normal prohibition on a read-only
+ // configuration, and as such should be used EXTREMELY SPARINGLY and
+ // only in very specific circumstances.
+ // Specifically, it is ONLY safe to use thing function to make changes
+ // that result in a functionally identical configuration (migrating to
+ // newer, but identical, configuration fields), or during libpod init
+ // WHILE HOLDING THE ALIVE LOCK (to prevent other libpod instances from
+ // being initialized).
+ // Most things in config can be changed by this, but container ID and
+ // name ABSOLUTELY CANNOT BE ALTERED. If you do so, there is a high
+ // potential for database corruption.
+ // There are a lot of capital letters and conditions here, but the short
+ // answer is this: use this only very sparingly, and only if you really
+ // know what you're doing.
+ RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
+ // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING.
+ // This function is identical to RewriteContainerConfig, save for the
+ // fact that it is used with pods instead.
+ // It is subject to the same conditions as RewriteContainerConfig.
+ // Please do not use this unless you know what you're doing.
+ RewritePodConfig(pod *Pod, newCfg *PodConfig) error
+
// Accepts full ID of pod.
// If the pod given is not in the set namespace, an error will be
// returned.
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 4bd00ab55..be68a2d69 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -1298,6 +1298,78 @@ func TestCannotUseBadIDAsGenericDependency(t *testing.T) {
})
}
+func TestRewriteContainerConfigDoesNotExist(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ err := state.RewriteContainerConfig(&Container{}, &ContainerConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewriteContainerConfigNotInState(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+ err = state.RewriteContainerConfig(testCtr, &ContainerConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewriteContainerConfigRewritesConfig(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ err = state.AddContainer(testCtr)
+ assert.NoError(t, err)
+
+ testCtr.config.LogPath = "/another/path/"
+
+ err = state.RewriteContainerConfig(testCtr, testCtr.config)
+ assert.NoError(t, err)
+
+ testCtrFromState, err := state.Container(testCtr.ID())
+ assert.NoError(t, err)
+
+ testContainersEqual(t, testCtrFromState, testCtr, true)
+ })
+}
+
+func TestRewritePodConfigDoesNotExist(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ err := state.RewritePodConfig(&Pod{}, &PodConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewritePodConfigNotInState(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
+ assert.NoError(t, err)
+ err = state.RewritePodConfig(testPod, &PodConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewritePodConfigRewritesConfig(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
+ assert.NoError(t, err)
+
+ err = state.AddPod(testPod)
+ assert.NoError(t, err)
+
+ testPod.config.CgroupParent = "/another_cgroup_parent"
+
+ err = state.RewritePodConfig(testPod, testPod.config)
+ assert.NoError(t, err)
+
+ testPodFromState, err := state.Pod(testPod.ID())
+ assert.NoError(t, err)
+
+ testPodsEqual(t, testPodFromState, testPod, true)
+ })
+}
+
func TestGetPodDoesNotExist(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Pod("doesnotexist")
diff --git a/libpod/volume.go b/libpod/volume.go
index 026a3bf49..74878b6a4 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -1,7 +1,5 @@
package libpod
-import "github.com/containers/libpod/libpod/lock"
-
// Volume is the type used to create named volumes
// TODO: all volumes should be created using this and the Volume API
type Volume struct {
@@ -9,7 +7,6 @@ type Volume struct {
valid bool
runtime *Runtime
- lock lock.Locker
}
// VolumeConfig holds the volume's config information
@@ -17,8 +14,6 @@ type Volume struct {
type VolumeConfig struct {
// Name of the volume
Name string `json:"name"`
- // ID of this volume's lock
- LockID uint32 `json:"lockID"`
Labels map[string]string `json:"labels"`
MountPoint string `json:"mountPoint"`
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 0de8a2350..35f0ca19d 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -18,8 +18,5 @@ func newVolume(runtime *Runtime) (*Volume, error) {
// teardownStorage deletes the volume from volumePath
func (v *Volume) teardownStorage() error {
- if !v.valid {
- return ErrNoSuchVolume
- }
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
}