summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
authorMatthew Heon <mheon@redhat.com>2021-01-12 14:29:27 -0500
committerMatthew Heon <mheon@redhat.com>2021-01-14 18:29:28 -0500
commit997de2f8e9e5453a99108bde012aa6c41d7323ec (patch)
tree499660321cf95f726636fcdd3dd4a8afbb86e2f5 /libpod
parent2b7793b6121d336a285fb7b9a7612c221cbf63d2 (diff)
downloadpodman-997de2f8e9e5453a99108bde012aa6c41d7323ec.tar.gz
podman-997de2f8e9e5453a99108bde012aa6c41d7323ec.tar.bz2
podman-997de2f8e9e5453a99108bde012aa6c41d7323ec.zip
Initial implementation of renaming containers
Basic theory: We remove the container, but *only from the DB*. We leave it in c/storage, we leave the lock allocated, we leave it running (if it is). Then we create an identical container with an altered name, and add that back to the database. Theoretically we now have a renamed container. The advantage of this approach is that it doesn't just apply to rename - we can use this to make *any* configuration change to a container that does not alter its container ID. Potential problems are numerous. This process is *THOROUGHLY* non-atomic at present - if you `kill -9` Podman mid-rename things will be in a bad place, for example. Also, we can't rename containers that can't be removed normally - IE, containers with dependencies (pod infra containers, for example). The largest potential improvement will be to move the majority of the work into the DB, with a `RecreateContainer()` method - that will add atomicity, and let us remove the container without worrying about depencies and similar issues. Potential problems: long-running processes that edit the DB and may have an older version of the configuration around. Most notable example is `podman run --rm` - the removal command needed to be manually edited to avoid this one. This begins to get at the heart of me not wanting to do this in the first place... This provides CLI and API implementations for frontend, but no tunnel implementation. It will be added in a future release (just held back for time now - we need this in 3.0 and are running low on time). This is honestly kind of horrifying, but I think it will work. Signed-off-by: Matthew Heon <mheon@redhat.com>
Diffstat (limited to 'libpod')
-rw-r--r--libpod/runtime_ctr.go149
1 files changed, 147 insertions, 2 deletions
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index f22e48746..d2bcd8db3 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -72,6 +72,140 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
return r.setupContainer(ctx, ctr)
}
+// RenameContainer renames the given container.
+// The given container object will be rendered unusable, and a new, renamed
+// Container will be returned.
+func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) {
+ ctr.lock.Lock()
+ defer ctr.lock.Unlock()
+
+ if err := ctr.syncContainer(); err != nil {
+ return nil, err
+ }
+
+ if newName == "" || !define.NameRegex.MatchString(newName) {
+ return nil, define.RegexError
+ }
+
+ // Check if the name is available.
+ // This is *100% NOT ATOMIC* so any failures in-flight will do
+ // *VERY BAD THINGS* to the state. So we have to try and catch all we
+ // can before starting.
+ if _, err := r.state.LookupContainerID(newName); err == nil {
+ return nil, errors.Wrapf(define.ErrCtrExists, "name %s is already in use by another container", newName)
+ }
+ if _, err := r.state.LookupPod(newName); err == nil {
+ return nil, errors.Wrapf(define.ErrPodExists, "name %s is already in use by another pod", newName)
+ }
+
+ // TODO: Investigate if it is possible to remove this limitation.
+ depCtrs, err := r.state.ContainerInUse(ctr)
+ if err != nil {
+ return nil, err
+ }
+ if len(depCtrs) > 0 {
+ return nil, errors.Wrapf(define.ErrCtrExists, "cannot rename container %s as it is in use by other containers: %v", ctr.ID(), strings.Join(depCtrs, ","))
+ }
+
+ // We need to pull an updated config, in case another rename fired and
+ // the config was re-written.
+ newConf, err := r.state.GetContainerConfig(ctr.ID())
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", ctr.ID())
+ }
+ ctr.config = newConf
+
+ // TODO: This is going to fail if we have active exec sessions, too.
+ // Investigate fixing that at a later date.
+
+ var pod *Pod
+ if ctr.config.Pod != "" {
+ tmpPod, err := r.state.Pod(ctr.config.Pod)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving container %s pod", ctr.ID())
+ }
+ pod = tmpPod
+ // Lock pod to ensure it's not removed while we're working
+ pod.lock.Lock()
+ defer pod.lock.Unlock()
+ }
+
+ // Lock all volumes to ensure they are not removed while we're working
+ volsLocked := make(map[string]bool)
+ for _, namedVol := range ctr.config.NamedVolumes {
+ if volsLocked[namedVol.Name] {
+ continue
+ }
+ vol, err := r.state.Volume(namedVol.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving volume used by container %s", ctr.ID())
+ }
+
+ volsLocked[vol.Name()] = true
+ vol.lock.Lock()
+ defer vol.lock.Unlock()
+ }
+
+ logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName)
+
+ // Step 1: remove the old container.
+ if pod != nil {
+ if err := r.state.RemoveContainerFromPod(pod, ctr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ }
+ } else {
+ if err := r.state.RemoveContainer(ctr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ }
+ }
+
+ // Step 2: Make a new container based on the old one.
+ // TODO: Should we deep-copy the container config and state, to be safe?
+ newCtr := new(Container)
+ newCtr.config = ctr.config
+ newCtr.state = ctr.state
+ newCtr.lock = ctr.lock
+ newCtr.ociRuntime = ctr.ociRuntime
+ newCtr.runtime = r
+ newCtr.rootlessSlirpSyncR = ctr.rootlessSlirpSyncR
+ newCtr.rootlessSlirpSyncW = ctr.rootlessSlirpSyncW
+ newCtr.rootlessPortSyncR = ctr.rootlessPortSyncR
+ newCtr.rootlessPortSyncW = ctr.rootlessPortSyncW
+
+ newCtr.valid = true
+ newCtr.config.Name = newName
+
+ // Step 3: Add that new container to the DB
+ if pod != nil {
+ if err := r.state.AddContainerToPod(pod, newCtr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
+ }
+ } else {
+ if err := r.state.AddContainer(newCtr); err != nil {
+ return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
+ }
+ }
+
+ // Step 4: Save the new container, to force the state to be written to
+ // the DB. This may not be necessary, depending on DB implementation,
+ // but let's do it to be safe.
+ if err := newCtr.save(); err != nil {
+ return nil, err
+ }
+
+ // Step 5: rename the container in c/storage.
+ // This can fail if the name is already in use by a non-Podman
+ // container. This puts us in a bad spot - we've already renamed the
+ // container in Podman. We can swap the order, but then we have the
+ // opposite problem. Atomicity is a real problem here, with no easy
+ // solution.
+ if err := r.store.SetNames(newCtr.ID(), []string{newCtr.Name()}); err != nil {
+ return nil, err
+ }
+
+ return newCtr, nil
+}
+
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
if rSpec == nil {
return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container")
@@ -393,7 +527,7 @@ func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool,
// removePod is used only when removing pods. It instructs Podman to ignore
// infra container protections, and *not* remove from the database (as pod
// remove will handle that).
-func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, removeVolume bool, removePod bool) error {
+func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, removeVolume, removePod bool) error {
span, _ := opentracing.StartSpanFromContext(ctx, "removeContainer")
span.SetTag("type", "runtime")
defer span.Finish()
@@ -406,6 +540,18 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
+ // We need to refresh container config from the DB, to ensure that any
+ // changes (e.g. a rename) are picked up before we start removing.
+ // Since HasContainer above succeeded, we can safely assume the
+ // container exists.
+ // This is *very iffy* but it should be OK because the container won't
+ // exist once we're done.
+ newConf, err := r.state.GetContainerConfig(c.ID())
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", c.ID())
+ }
+ c.config = newConf
+
logrus.Debugf("Removing container %s", c.ID())
// We need to lock the pod before we lock the container.
@@ -413,7 +559,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// Don't need to do this in pod removal case - we're evicting the entire
// pod.
var pod *Pod
- var err error
runtime := c.runtime
if c.config.Pod != "" && !removePod {
pod, err = r.state.Pod(c.config.Pod)