summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go98
-rw-r--r--libpod/container_internal.go22
-rw-r--r--libpod/container_internal_linux.go212
-rw-r--r--libpod/container_log.go1
-rw-r--r--libpod/container_log_linux.go1
-rw-r--r--libpod/image/image.go6
-rw-r--r--libpod/image/image_test.go8
-rw-r--r--libpod/image/layer_tree.go8
-rw-r--r--libpod/image/pull.go13
-rw-r--r--libpod/in_memory_state.go40
-rw-r--r--libpod/oci_conmon_linux.go12
-rw-r--r--libpod/rootless_cni_linux.go2
-rw-r--r--libpod/runtime_ctr.go113
-rw-r--r--libpod/runtime_pod_infra_linux.go2
-rw-r--r--libpod/state.go13
15 files changed, 240 insertions, 311 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 6f2eaeab2..122dd080f 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -1681,6 +1681,104 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
return err
}
+// SafeRewriteContainerConfig rewrites a container's configuration in a more
+// limited fashion than RewriteContainerConfig. It is marked as safe to use
+// under most circumstances, unlike RewriteContainerConfig.
+// DO NOT USE TO: Change container dependencies, change pod membership, change
+// locks, change container ID.
+func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if newName != "" && newCfg.Name != newName {
+ return errors.Wrapf(define.ErrInvalidArg, "new name %s for container %s must match name in given container config", newName, ctr.ID())
+ }
+ if newName != "" && oldName == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "must provide old name for container if a new name is given")
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ if newName != "" {
+ idBkt, err := getIDBucket(tx)
+ if err != nil {
+ return err
+ }
+ namesBkt, err := getNamesBucket(tx)
+ if err != nil {
+ return err
+ }
+ allCtrsBkt, err := getAllCtrsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ needsRename := true
+ if exists := namesBkt.Get([]byte(newName)); exists != nil {
+ if string(exists) == ctr.ID() {
+ // Name already associated with the ID
+ // of this container. No need for a
+ // rename.
+ needsRename = false
+ } else {
+ return errors.Wrapf(define.ErrCtrExists, "name %s already in use, cannot rename container %s", newName, ctr.ID())
+ }
+ }
+
+ if needsRename {
+ // We do have to remove the old name. The other
+ // buckets are ID-indexed so we just need to
+ // overwrite the values there.
+ if err := namesBkt.Delete([]byte(oldName)); err != nil {
+ return errors.Wrapf(err, "error deleting container %s old name from DB for rename", ctr.ID())
+ }
+ if err := idBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
+ return errors.Wrapf(err, "error renaming container %s in ID bucket in DB", ctr.ID())
+ }
+ if err := namesBkt.Put([]byte(newName), []byte(ctr.ID())); err != nil {
+ return errors.Wrapf(err, "error adding new name %s for container %s in DB", newName, ctr.ID())
+ }
+ if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
+ return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID())
+ }
+ }
+ }
+
+ ctrBkt, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
+ if ctrDB == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ }
+
+ if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
// RewritePodConfig rewrites a pod's configuration.
// WARNING: This function is DANGEROUS. Do not use without reading the full
// comment on this function in state.go.
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 2e0c24579..7e8226de4 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -13,6 +13,7 @@ import (
"strings"
"time"
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
"github.com/containers/buildah/copier"
"github.com/containers/common/pkg/secrets"
"github.com/containers/podman/v3/libpod/define"
@@ -135,7 +136,7 @@ func (c *Container) ControlSocketPath() string {
// CheckpointPath returns the path to the directory containing the checkpoint
func (c *Container) CheckpointPath() string {
- return filepath.Join(c.bundlePath(), "checkpoint")
+ return filepath.Join(c.bundlePath(), metadata.CheckpointDirectory)
}
// PreCheckpointPath returns the path to the directory containing the pre-checkpoint-images
@@ -2141,26 +2142,11 @@ func (c *Container) canWithPrevious() error {
return err
}
-// writeJSONFile marshalls and writes the given data to a JSON file
-// in the bundle path
-func (c *Container) writeJSONFile(v interface{}, file string) error {
- fileJSON, err := json.MarshalIndent(v, "", " ")
- if err != nil {
- return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID())
- }
- file = filepath.Join(c.bundlePath(), file)
- if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil {
- return err
- }
-
- return nil
-}
-
// prepareCheckpointExport writes the config and spec to
// JSON files for later export
func (c *Container) prepareCheckpointExport() error {
// save live config
- if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil {
+ if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil {
return err
}
@@ -2171,7 +2157,7 @@ func (c *Container) prepareCheckpointExport() error {
logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err)
return err
}
- if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil {
+ if _, err := metadata.WriteJSONFile(g.Config, c.bundlePath(), metadata.SpecDumpFile); err != nil {
return err
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index dc0418148..2684c2845 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -19,6 +19,7 @@ import (
"syscall"
"time"
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/buildah/pkg/chrootuser"
@@ -33,6 +34,7 @@ import (
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/pkg/annotations"
"github.com/containers/podman/v3/pkg/cgroups"
+ "github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/criu"
"github.com/containers/podman/v3/pkg/lookup"
"github.com/containers/podman/v3/pkg/resolvconf"
@@ -884,80 +886,32 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
includeFiles := []string{
- "checkpoint",
"artifacts",
"ctr.log",
- "config.dump",
- "spec.dump",
- "network.status"}
+ metadata.CheckpointDirectory,
+ metadata.ConfigDumpFile,
+ metadata.SpecDumpFile,
+ metadata.NetworkStatusFile,
+ }
if options.PreCheckPoint {
includeFiles[0] = "pre-checkpoint"
}
// Get root file-system changes included in the checkpoint archive
- rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
- deleteFilesList := filepath.Join(c.bundlePath(), "deleted.files")
+ var addToTarFiles []string
if !options.IgnoreRootfs {
// To correctly track deleted files, let's go through the output of 'podman diff'
- tarFiles, err := c.runtime.GetDiff("", c.ID())
+ rootFsChanges, err := c.runtime.GetDiff("", c.ID())
if err != nil {
- return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
+ return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID())
}
- var rootfsIncludeFiles []string
- var deletedFiles []string
-
- for _, file := range tarFiles {
- if file.Kind == archive.ChangeAdd {
- rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
- continue
- }
- if file.Kind == archive.ChangeDelete {
- deletedFiles = append(deletedFiles, file.Path)
- continue
- }
- fileName, err := os.Stat(file.Path)
- if err != nil {
- continue
- }
- if !fileName.IsDir() && file.Kind == archive.ChangeModify {
- rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
- continue
- }
- }
-
- if len(rootfsIncludeFiles) > 0 {
- rootfsTar, err := archive.TarWithOptions(c.state.Mountpoint, &archive.TarOptions{
- Compression: archive.Uncompressed,
- IncludeSourceDir: true,
- IncludeFiles: rootfsIncludeFiles,
- })
- if err != nil {
- return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
- }
- rootfsDiffFile, err := os.Create(rootfsDiffPath)
- if err != nil {
- return errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
- }
- defer rootfsDiffFile.Close()
- _, err = io.Copy(rootfsDiffFile, rootfsTar)
- if err != nil {
- return err
- }
- includeFiles = append(includeFiles, "rootfs-diff.tar")
+ addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath())
+ if err != nil {
+ return err
}
- if len(deletedFiles) > 0 {
- formatJSON, err := json.MarshalIndent(deletedFiles, "", " ")
- if err != nil {
- return errors.Wrapf(err, "error creating delete files list file %q", deleteFilesList)
- }
- if err := ioutil.WriteFile(deleteFilesList, formatJSON, 0600); err != nil {
- return errors.Wrap(err, "error creating delete files list file")
- }
-
- includeFiles = append(includeFiles, "deleted.files")
- }
+ includeFiles = append(includeFiles, addToTarFiles...)
}
// Folder containing archived volumes that will be included in the export
@@ -1034,8 +988,9 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
return err
}
- os.Remove(rootfsDiffPath)
- os.Remove(deleteFilesList)
+ for _, file := range addToTarFiles {
+ os.Remove(filepath.Join(c.bundlePath(), file))
+ }
if !options.IgnoreVolumes {
os.RemoveAll(expVolDir)
@@ -1054,23 +1009,6 @@ func (c *Container) checkpointRestoreSupported() error {
return nil
}
-func (c *Container) checkpointRestoreLabelLog(fileName string) error {
- // Create the CRIU log file and label it
- dumpLog := filepath.Join(c.bundlePath(), fileName)
-
- logFile, err := os.OpenFile(dumpLog, os.O_CREATE, 0600)
- if err != nil {
- return errors.Wrap(err, "failed to create CRIU log file")
- }
- if err := logFile.Close(); err != nil {
- logrus.Error(err)
- }
- if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
- return err
- }
- return nil
-}
-
func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
if err := c.checkpointRestoreSupported(); err != nil {
return err
@@ -1084,7 +1022,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
}
- if err := c.checkpointRestoreLabelLog("dump.log"); err != nil {
+ if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "dump.log", c.MountLabel()); err != nil {
return err
}
@@ -1095,11 +1033,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
// Save network.status. This is needed to restore the container with
// the same IP. Currently limited to one IP address in a container
// with one interface.
- formatJSON, err := json.MarshalIndent(c.state.NetworkStatus, "", " ")
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(filepath.Join(c.bundlePath(), "network.status"), formatJSON, 0644); err != nil {
+ if _, err := metadata.WriteJSONFile(c.state.NetworkStatus, c.bundlePath(), metadata.NetworkStatusFile); err != nil {
return err
}
@@ -1115,7 +1049,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if options.TargetFile != "" {
- if err = c.exportCheckpoint(options); err != nil {
+ if err := c.exportCheckpoint(options); err != nil {
return err
}
}
@@ -1135,8 +1069,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
cleanup := []string{
"dump.log",
"stats-dump",
- "config.dump",
- "spec.dump",
+ metadata.ConfigDumpFile,
+ metadata.SpecDumpFile,
}
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
@@ -1151,28 +1085,13 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
func (c *Container) importCheckpoint(input string) error {
- archiveFile, err := os.Open(input)
- if err != nil {
- return errors.Wrap(err, "failed to open checkpoint archive for import")
- }
-
- defer archiveFile.Close()
- options := &archive.TarOptions{
- ExcludePatterns: []string{
- // config.dump and spec.dump are only required
- // container creation
- "config.dump",
- "spec.dump",
- },
- }
- err = archive.Untar(archiveFile, c.bundlePath(), options)
- if err != nil {
- return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input)
+ if err := crutils.CRImportCheckpointWithoutConfig(c.bundlePath(), input); err != nil {
+ return err
}
// Make sure the newly created config.json exists on disk
g := generate.Generator{Config: c.config.Spec}
- if err = c.saveSpec(g.Config); err != nil {
+ if err := c.saveSpec(g.Config); err != nil {
return errors.Wrap(err, "saving imported container specification for restore failed")
}
@@ -1221,7 +1140,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
}
- if err := c.checkpointRestoreLabelLog("restore.log"); err != nil {
+ if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil {
return err
}
@@ -1244,7 +1163,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Read network configuration from checkpoint
// Currently only one interface with one IP is supported.
- networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status"))
+ networkStatus, _, err := metadata.ReadContainerCheckpointNetworkStatus(c.bundlePath())
// If the restored container should get a new name, the IP address of
// the container will not be restored. This assumes that if a new name is
// specified, the container is restored multiple times.
@@ -1254,43 +1173,14 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) {
// The file with the network.status does exist. Let's restore the
// container with the same IP address / MAC address as during checkpointing.
- defer networkStatusFile.Close()
- var networkStatus []*cnitypes.Result
- networkJSON, err := ioutil.ReadAll(networkStatusFile)
- if err != nil {
- return err
- }
- if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
- return err
- }
if !options.IgnoreStaticIP {
- // Take the first IP address
- var IP net.IP
- if len(networkStatus) > 0 {
- if len(networkStatus[0].IPs) > 0 {
- IP = networkStatus[0].IPs[0].Address.IP
- }
- }
- if IP != nil {
+ if IP := metadata.GetIPFromNetworkStatus(networkStatus); IP != nil {
// Tell CNI which IP address we want.
c.requestedIP = IP
}
}
if !options.IgnoreStaticMAC {
- // Take the first device with a defined sandbox.
- var MAC net.HardwareAddr
- if len(networkStatus) > 0 {
- for _, n := range networkStatus[0].Interfaces {
- if n.Sandbox != "" {
- MAC, err = net.ParseMAC(n.Mac)
- if err != nil {
- return err
- }
- break
- }
- }
- }
- if MAC != nil {
+ if MAC := metadata.GetMACFromNetworkStatus(networkStatus); MAC != nil {
// Tell CNI which MAC address we want.
c.requestedMAC = MAC
}
@@ -1398,36 +1288,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Before actually restarting the container, apply the root file-system changes
if !options.IgnoreRootfs {
- rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
- if _, err := os.Stat(rootfsDiffPath); err == nil {
- // Only do this if a rootfs-diff.tar actually exists
- rootfsDiffFile, err := os.Open(rootfsDiffPath)
- if err != nil {
- return errors.Wrap(err, "failed to open root file-system diff file")
- }
- defer rootfsDiffFile.Close()
- if err := c.runtime.ApplyDiffTarStream(c.ID(), rootfsDiffFile); err != nil {
- return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath)
- }
+ if err := crutils.CRApplyRootFsDiffTar(c.bundlePath(), c.state.Mountpoint); err != nil {
+ return err
}
- deletedFilesPath := filepath.Join(c.bundlePath(), "deleted.files")
- if _, err := os.Stat(deletedFilesPath); err == nil {
- var deletedFiles []string
- deletedFilesJSON, err := ioutil.ReadFile(deletedFilesPath)
- if err != nil {
- return errors.Wrapf(err, "failed to read deleted files file")
- }
- if err := json.Unmarshal(deletedFilesJSON, &deletedFiles); err != nil {
- return errors.Wrapf(err, "failed to unmarshal deleted files file %s", deletedFilesPath)
- }
- for _, deleteFile := range deletedFiles {
- // Using RemoveAll as deletedFiles, which is generated from 'podman diff'
- // lists completely deleted directories as a single entry: 'D /root'.
- err = os.RemoveAll(filepath.Join(c.state.Mountpoint, deleteFile))
- if err != nil {
- return errors.Wrapf(err, "failed to delete files from container %s during restore", c.ID())
- }
- }
+
+ if err := crutils.CRRemoveDeletedFiles(c.ID(), c.bundlePath(), c.state.Mountpoint); err != nil {
+ return err
}
}
@@ -1452,7 +1318,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
logrus.Debugf("Non-fatal: removal of pre-checkpoint directory (%s) failed: %v", c.PreCheckPointPath(), err)
}
- cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status", "rootfs-diff.tar", "deleted.files"}
+ cleanup := [...]string{
+ "restore.log",
+ "dump.log",
+ "stats-dump",
+ "stats-restore",
+ metadata.NetworkStatusFile,
+ metadata.RootFsDiffTar,
+ metadata.DeletedFilesFile,
+ }
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
err = os.Remove(file)
diff --git a/libpod/container_log.go b/libpod/container_log.go
index a3b700004..c207df819 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -29,7 +29,6 @@ func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logCh
case define.NoLogging:
return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs")
case define.JournaldLogging:
- // TODO Skip sending logs until journald logs can be read
return c.readFromJournal(ctx, options, logChannel)
case define.JSONLogging:
// TODO provide a separate implementation of this when Conmon
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index 5792633b0..4a541b6e7 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -52,6 +52,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
if time.Now().Before(options.Since) {
return nil
}
+ // coreos/go-systemd/sdjournal expects a negative time.Duration for times in the past
config.Since = -time.Since(options.Since)
}
config.Matches = append(config.Matches, journal.Match{
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 7c760a79a..265178ad5 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -143,7 +143,7 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) {
// New creates a new image object where the image could be local
// or remote
-func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType) (*Image, error) {
+func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType, progress chan types.ProgressProperties) (*Image, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "newImage")
span.SetTag("type", "runtime")
defer span.Finish()
@@ -162,7 +162,7 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label)
+ imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label, progress)
if err != nil {
return nil, err
}
@@ -323,7 +323,7 @@ func (ir *Runtime) LoadAllImagesFromDockerArchive(ctx context.Context, fileName
}
defer goal.cleanUp()
- imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil)
+ imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil, nil)
if err != nil {
return nil, err
}
diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go
index 1ea4f6c11..3e6e7b9db 100644
--- a/libpod/image/image_test.go
+++ b/libpod/image/image_test.go
@@ -94,9 +94,9 @@ func TestImage_NewFromLocal(t *testing.T) {
ir, err := NewImageRuntimeFromOptions(so)
assert.NoError(t, err)
ir.Eventer = events.NewNullEventer()
- bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing)
+ bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil)
assert.NoError(t, err)
- bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing)
+ bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil)
assert.NoError(t, err)
tm := makeLocalMatrix(bb, bbglibc)
@@ -140,7 +140,7 @@ func TestImage_New(t *testing.T) {
// Iterate over the names and delete the image
// after the pull
for _, img := range names {
- newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing)
+ newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil)
assert.NoError(t, err)
assert.NotEqual(t, newImage.ID(), "")
err = newImage.Remove(context.Background(), false)
@@ -169,7 +169,7 @@ func TestImage_MatchRepoTag(t *testing.T) {
ir, err := NewImageRuntimeFromOptions(so)
assert.NoError(t, err)
ir.Eventer = events.NewNullEventer()
- newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, nil, util.PullImageMissing)
+ newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, nil, util.PullImageMissing, nil)
assert.NoError(t, err)
err = newImage.TagImage("foo:latest")
assert.NoError(t, err)
diff --git a/libpod/image/layer_tree.go b/libpod/image/layer_tree.go
index dde39dba1..aa3084449 100644
--- a/libpod/image/layer_tree.go
+++ b/libpod/image/layer_tree.go
@@ -4,7 +4,6 @@ import (
"context"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -188,7 +187,12 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) {
node, exists := t.nodes[child.TopLayer()]
if !exists {
- return nil, errors.Errorf("layer not found in layer tree: %q", child.TopLayer())
+ // Note: erroring out in this case has turned out having been a
+ // mistake. Users may not be able to recover, so we're now
+ // throwing a warning to guide them to resolve the issue and
+ // turn the errors non-fatal.
+ logrus.Warnf("Layer %s not found in layer. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer())
+ return nil, nil
}
childOCI, err := t.toOCI(ctx, child)
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 3cb1e57c7..c5fafc25d 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -6,6 +6,7 @@ import (
"io"
"path/filepath"
"strings"
+ "time"
"github.com/containers/common/pkg/retry"
cp "github.com/containers/image/v5/copy"
@@ -241,7 +242,7 @@ func toLocalImageName(imageName string) string {
// pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries.
// Use pullImageFromReference if the source is known precisely.
-func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) {
+func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string, progress chan types.ProgressProperties) ([]string, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromHeuristicSource")
defer span.Finish()
@@ -275,7 +276,7 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
}
}
defer goal.cleanUp()
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label, progress)
}
// pullImageFromReference pulls an image from a types.imageReference.
@@ -294,7 +295,7 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag
return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef))
}
defer goal.cleanUp()
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil, nil)
}
func cleanErrorMessage(err error) string {
@@ -304,7 +305,7 @@ func cleanErrorMessage(err error) string {
}
// doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead.
-func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) {
+func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string, progress chan types.ProgressProperties) ([]string, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "doPullImage")
defer span.Finish()
@@ -328,6 +329,10 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
for _, imageInfo := range goal.refPairs {
copyOptions := getCopyOptions(sc, writer, dockerOptions, nil, signingOptions, "", nil)
copyOptions.SourceCtx.SystemRegistriesConfPath = systemRegistriesConfPath // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
+ if progress != nil {
+ copyOptions.Progress = progress
+ copyOptions.ProgressInterval = time.Second
+ }
// Print the following statement only when pulling from a docker or atomic registry
if writer != nil && (imageInfo.srcRef.Transport().Name() == DockerTransport || imageInfo.srcRef.Transport().Name() == AtomicTransport) {
if _, err := io.WriteString(writer, fmt.Sprintf("Trying to pull %s...\n", imageInfo.image)); err != nil {
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 26f15d9c8..3875878ed 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -822,6 +822,46 @@ func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *Container
return nil
}
+// SafeRewriteContainerConfig rewrites a container's configuration.
+// It's safer than RewriteContainerConfig, but still has limitations. Please
+// read the comment in state.go before using.
+func (s *InMemoryState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error {
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if _, err := s.nameIndex.Get(newName); err == nil {
+ return errors.Wrapf(define.ErrCtrExists, "name %s is in use", newName)
+ }
+
+ // If the container does not exist, return error
+ stateCtr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ }
+
+ // Change name in registry.
+ if s.namespace != "" {
+ nsIndex, ok := s.namespaceIndexes[s.namespace]
+ if !ok {
+ return define.ErrInternal
+ }
+ nsIndex.nameIndex.Release(oldName)
+ if err := nsIndex.nameIndex.Reserve(newName, ctr.ID()); err != nil {
+ return errors.Wrapf(err, "error registering name %s", newName)
+ }
+ }
+ s.nameIndex.Release(oldName)
+ if err := s.nameIndex.Reserve(newName, ctr.ID()); err != nil {
+ return errors.Wrapf(err, "error registering name %s", newName)
+ }
+
+ stateCtr.config = newCfg
+
+ return nil
+}
+
// RewritePodConfig rewrites a pod's configuration.
// This function is DANGEROUS, even with in-memory state.
// Please read the full comment on it in state.go before using it.
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index de7630c06..492bc807a 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -28,6 +28,7 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/logs"
"github.com/containers/podman/v3/pkg/cgroups"
+ "github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/lookup"
"github.com/containers/podman/v3/pkg/rootless"
@@ -837,16 +838,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
// SupportsCheckpoint checks if the OCI runtime supports checkpointing
// containers.
func (r *ConmonOCIRuntime) SupportsCheckpoint() bool {
- // Check if the runtime implements checkpointing. Currently only
- // runc's checkpoint/restore implementation is supported.
- cmd := exec.Command(r.path, "checkpoint", "--help")
- if err := cmd.Start(); err != nil {
- return false
- }
- if err := cmd.Wait(); err == nil {
- return true
- }
- return false
+ return crutils.CRRuntimeSupportsCheckpointRestore(r.path)
}
// SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error
diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go
index e97985180..df690e914 100644
--- a/libpod/rootless_cni_linux.go
+++ b/libpod/rootless_cni_linux.go
@@ -265,7 +265,7 @@ func startRootlessCNIInfraContainer(ctx context.Context, r *Runtime) (*Container
}
logrus.Debugf("rootless CNI: ensuring image %q to exist", imageName)
newImage, err := r.ImageRuntime().New(ctx, imageName, "", "", nil, nil,
- image.SigningOptions{}, nil, util.PullImageMissing)
+ image.SigningOptions{}, nil, util.PullImageMissing, nil)
if err != nil {
return nil, err
}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 8bf862bf2..301c4627d 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -74,8 +74,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
}
// RenameContainer renames the given container.
-// The given container object will be rendered unusable, and a new, renamed
-// Container will be returned.
+// Returns a copy of the container that has been renamed if successful.
func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) {
ctr.lock.Lock()
defer ctr.lock.Unlock()
@@ -88,26 +87,6 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
return nil, define.RegexError
}
- // Check if the name is available.
- // This is *100% NOT ATOMIC* so any failures in-flight will do
- // *VERY BAD THINGS* to the state. So we have to try and catch all we
- // can before starting.
- if _, err := r.state.LookupContainerID(newName); err == nil {
- return nil, errors.Wrapf(define.ErrCtrExists, "name %s is already in use by another container", newName)
- }
- if _, err := r.state.LookupPod(newName); err == nil {
- return nil, errors.Wrapf(define.ErrPodExists, "name %s is already in use by another pod", newName)
- }
-
- // TODO: Investigate if it is possible to remove this limitation.
- depCtrs, err := r.state.ContainerInUse(ctr)
- if err != nil {
- return nil, err
- }
- if len(depCtrs) > 0 {
- return nil, errors.Wrapf(define.ErrCtrExists, "cannot rename container %s as it is in use by other containers: %v", ctr.ID(), strings.Join(depCtrs, ","))
- }
-
// We need to pull an updated config, in case another rename fired and
// the config was re-written.
newConf, err := r.state.GetContainerConfig(ctr.ID())
@@ -116,95 +95,33 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
}
ctr.config = newConf
- // TODO: This is going to fail if we have active exec sessions, too.
- // Investigate fixing that at a later date.
-
- var pod *Pod
- if ctr.config.Pod != "" {
- tmpPod, err := r.state.Pod(ctr.config.Pod)
- if err != nil {
- return nil, errors.Wrapf(err, "error retrieving container %s pod", ctr.ID())
- }
- pod = tmpPod
- // Lock pod to ensure it's not removed while we're working
- pod.lock.Lock()
- defer pod.lock.Unlock()
- }
-
- // Lock all volumes to ensure they are not removed while we're working
- volsLocked := make(map[string]bool)
- for _, namedVol := range ctr.config.NamedVolumes {
- if volsLocked[namedVol.Name] {
- continue
- }
- vol, err := r.state.Volume(namedVol.Name)
- if err != nil {
- return nil, errors.Wrapf(err, "error retrieving volume used by container %s", ctr.ID())
- }
-
- volsLocked[vol.Name()] = true
- vol.lock.Lock()
- defer vol.lock.Unlock()
- }
-
logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName)
- // Step 1: remove the old container.
- if pod != nil {
- if err := r.state.RemoveContainerFromPod(pod, ctr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
- }
- } else {
- if err := r.state.RemoveContainer(ctr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
- }
- }
-
- // Step 2: Make a new container based on the old one.
- // TODO: Should we deep-copy the container config and state, to be safe?
- newCtr := new(Container)
- newCtr.config = ctr.config
- newCtr.state = ctr.state
- newCtr.lock = ctr.lock
- newCtr.ociRuntime = ctr.ociRuntime
- newCtr.runtime = r
- newCtr.rootlessSlirpSyncR = ctr.rootlessSlirpSyncR
- newCtr.rootlessSlirpSyncW = ctr.rootlessSlirpSyncW
- newCtr.rootlessPortSyncR = ctr.rootlessPortSyncR
- newCtr.rootlessPortSyncW = ctr.rootlessPortSyncW
-
- newCtr.valid = true
- newCtr.config.Name = newName
-
- // Step 3: Add that new container to the DB
- if pod != nil {
- if err := r.state.AddContainerToPod(pod, newCtr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
- }
- } else {
- if err := r.state.AddContainer(newCtr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
- }
- }
+ // Step 1: Alter the config. Save the old name, we need it to rewrite
+ // the config.
+ oldName := ctr.config.Name
+ ctr.config.Name = newName
- // Step 4: Save the new container, to force the state to be written to
- // the DB. This may not be necessary, depending on DB implementation,
- // but let's do it to be safe.
- if err := newCtr.save(); err != nil {
- return nil, err
+ // Step 2: rewrite the old container's config in the DB.
+ if err := r.state.SafeRewriteContainerConfig(ctr, oldName, ctr.config.Name, ctr.config); err != nil {
+ // Assume the rename failed.
+ // Set config back to the old name so reflect what is actually
+ // present in the DB.
+ ctr.config.Name = oldName
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
}
- // Step 5: rename the container in c/storage.
+ // Step 3: rename the container in c/storage.
// This can fail if the name is already in use by a non-Podman
// container. This puts us in a bad spot - we've already renamed the
// container in Podman. We can swap the order, but then we have the
// opposite problem. Atomicity is a real problem here, with no easy
// solution.
- if err := r.store.SetNames(newCtr.ID(), []string{newCtr.Name()}); err != nil {
+ if err := r.store.SetNames(ctr.ID(), []string{ctr.Name()}); err != nil {
return nil, err
}
- return newCtr, nil
+ return ctr, nil
}
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index 000029fa4..0a09e40ea 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -216,7 +216,7 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container,
if img == "" {
img = r.config.Engine.InfraImage
}
- newImage, err := r.ImageRuntime().New(ctx, img, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing)
+ newImage, err := r.ImageRuntime().New(ctx, img, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing, nil)
if err != nil {
return nil, err
}
diff --git a/libpod/state.go b/libpod/state.go
index 074d21740..4b711bae9 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -155,6 +155,19 @@ type State interface {
// answer is this: use this only very sparingly, and only if you really
// know what you're doing.
RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
+ // This is a more limited version of RewriteContainerConfig, though it
+ // comes with the added ability to alter a container's name. In exchange
+ // it loses the ability to manipulate the container's locks.
+ // It is not intended to be as restrictive as RewriteContainerConfig, in
+ // that we allow it to be run while other Podman processes are running,
+ // and without holding the alive lock.
+ // Container ID and pod membership still *ABSOLUTELY CANNOT* be altered.
+ // Also, you cannot change a container's dependencies - shared namespace
+ // containers or generic dependencies - at present. This is
+ // theoretically possible but not yet implemented.
+ // If newName is not "" the container will be renamed to the new name.
+ // The oldName parameter is only required if newName is given.
+ SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error
// PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
// This function is identical to RewriteContainerConfig, save for the
// fact that it is used with pods instead.