diff options
Diffstat (limited to 'libpod')
32 files changed, 879 insertions, 451 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 6f2eaeab2..5df3e8961 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -879,7 +879,7 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) { ctrDB := ctrBucket.Bucket([]byte(ctr.ID())) if ctrDB == nil { ctr.valid = false - return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID()) } dependsBkt := ctrDB.Bucket(dependenciesBkt) @@ -1669,7 +1669,105 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf ctrDB := ctrBkt.Bucket([]byte(ctr.ID())) if ctrDB == nil { ctr.valid = false - return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID()) + } + + if err := ctrDB.Put(configKey, newCfgJSON); err != nil { + return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID()) + } + + return nil + }) + return err +} + +// SafeRewriteContainerConfig rewrites a container's configuration in a more +// limited fashion than RewriteContainerConfig. It is marked as safe to use +// under most circumstances, unlike RewriteContainerConfig. +// DO NOT USE TO: Change container dependencies, change pod membership, change +// locks, change container ID. +func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error { + if !s.valid { + return define.ErrDBClosed + } + + if !ctr.valid { + return define.ErrCtrRemoved + } + + if newName != "" && newCfg.Name != newName { + return errors.Wrapf(define.ErrInvalidArg, "new name %s for container %s must match name in given container config", newName, ctr.ID()) + } + if newName != "" && oldName == "" { + return errors.Wrapf(define.ErrInvalidArg, "must provide old name for container if a new name is given") + } + + newCfgJSON, err := json.Marshal(newCfg) + if err != nil { + return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID()) + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + err = db.Update(func(tx *bolt.Tx) error { + if newName != "" { + idBkt, err := getIDBucket(tx) + if err != nil { + return err + } + namesBkt, err := getNamesBucket(tx) + if err != nil { + return err + } + allCtrsBkt, err := getAllCtrsBucket(tx) + if err != nil { + return err + } + + needsRename := true + if exists := namesBkt.Get([]byte(newName)); exists != nil { + if string(exists) == ctr.ID() { + // Name already associated with the ID + // of this container. No need for a + // rename. + needsRename = false + } else { + return errors.Wrapf(define.ErrCtrExists, "name %s already in use, cannot rename container %s", newName, ctr.ID()) + } + } + + if needsRename { + // We do have to remove the old name. The other + // buckets are ID-indexed so we just need to + // overwrite the values there. + if err := namesBkt.Delete([]byte(oldName)); err != nil { + return errors.Wrapf(err, "error deleting container %s old name from DB for rename", ctr.ID()) + } + if err := idBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { + return errors.Wrapf(err, "error renaming container %s in ID bucket in DB", ctr.ID()) + } + if err := namesBkt.Put([]byte(newName), []byte(ctr.ID())); err != nil { + return errors.Wrapf(err, "error adding new name %s for container %s in DB", newName, ctr.ID()) + } + if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { + return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID()) + } + } + } + + ctrBkt, err := getCtrBucket(tx) + if err != nil { + return err + } + + ctrDB := ctrBkt.Bucket([]byte(ctr.ID())) + if ctrDB == nil { + ctr.valid = false + return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID()) } if err := ctrDB.Put(configKey, newCfgJSON); err != nil { diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index cf8f1c175..d4994334f 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -1055,9 +1055,9 @@ func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, n return nil, err } else if !exists { if isPod { - return nil, errors.Wrapf(define.ErrNoSuchCtr, "%s is a pod, not a container", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "%q is a pod, not a container", idOrName) } - return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %s found", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %q found", idOrName) } return id, nil } diff --git a/libpod/container.go b/libpod/container.go index ee6e243ac..65abbfd5e 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -904,6 +904,12 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in } } + return c.namespacePath(linuxNS) +} + +// namespacePath returns the path of one of the container's namespaces +// If the container is not running, an error will be returned +func (c *Container) namespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused { return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID()) } diff --git a/libpod/container_api.go b/libpod/container_api.go index 2818ac841..4ccb240e7 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -2,6 +2,7 @@ package libpod import ( "context" + "io" "io/ioutil" "net/http" "os" @@ -11,7 +12,6 @@ import ( "github.com/containers/podman/v3/libpod/define" "github.com/containers/podman/v3/libpod/events" "github.com/containers/podman/v3/pkg/signal" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -27,10 +27,6 @@ import ( // containers). The `recursive` parameter will, if set to true, start these // dependency containers before initializing this container. func (c *Container) Init(ctx context.Context, recursive bool) error { - span, _ := opentracing.StartSpanFromContext(ctx, "containerInit") - span.SetTag("struct", "container") - defer span.Finish() - if !c.batched { c.lock.Lock() defer c.lock.Unlock() @@ -83,10 +79,6 @@ func (c *Container) Init(ctx context.Context, recursive bool) error { // running before being run. The recursive parameter, if set, will start all // dependencies before starting this container. func (c *Container) Start(ctx context.Context, recursive bool) error { - span, _ := opentracing.StartSpanFromContext(ctx, "containerStart") - span.SetTag("struct", "container") - defer span.Finish() - if !c.batched { c.lock.Lock() defer c.lock.Unlock() @@ -349,10 +341,6 @@ func (c *Container) Mount() (string, error) { } } - if c.state.State == define.ContainerStateRemoving { - return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID()) - } - defer c.newContainerEvent(events.Mount) return c.mount() } @@ -367,7 +355,6 @@ func (c *Container) Unmount(force bool) error { return err } } - if c.state.Mounted { mounted, err := c.runtime.storageService.MountedContainerImage(c.ID()) if err != nil { @@ -847,31 +834,59 @@ func (c *Container) ShouldRestart(ctx context.Context) bool { return c.shouldRestart() } -// ResolvePath resolves the specified path on the root for the container. The -// root must either be the mounted image of the container or the already -// mounted container storage. -// -// It returns the resolved root and the resolved path. Note that the path may -// resolve to the container's mount point or to a volume or bind mount. -func (c *Container) ResolvePath(ctx context.Context, root string, path string) (string, string, error) { - logrus.Debugf("Resolving path %q (root %q) on container %s", path, root, c.ID()) +// CopyFromArchive copies the contents from the specified tarStream to path +// *inside* the container. +func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, tarStream io.Reader) (func() error, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() - // Minimal sanity checks. - if len(root)*len(path) == 0 { - return "", "", errors.Wrapf(define.ErrInternal, "ResolvePath: root (%q) and path (%q) must be non empty", root, path) + if err := c.syncContainer(); err != nil { + return nil, err + } } - if _, err := os.Stat(root); err != nil { - return "", "", errors.Wrapf(err, "cannot locate root to resolve path on container %s", c.ID()) + + return c.copyFromArchive(ctx, containerPath, tarStream) +} + +// CopyToArchive copies the contents from the specified path *inside* the +// container to the tarStream. +func (c *Container) CopyToArchive(ctx context.Context, containerPath string, tarStream io.Writer) (func() error, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return nil, err + } } + return c.copyToArchive(ctx, containerPath, tarStream) +} + +// Stat the specified path *inside* the container and return a file info. +func (c *Container) Stat(ctx context.Context, containerPath string) (*define.FileInfo, error) { if !c.batched { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return "", "", err + return nil, err + } + } + + var mountPoint string + var err error + if c.state.Mounted { + mountPoint = c.state.Mountpoint + } else { + mountPoint, err = c.mount() + if err != nil { + return nil, err } + defer c.unmount(false) } - return c.resolvePath(root, path) + info, _, _, err := c.stat(ctx, mountPoint, containerPath) + return info, err } diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go new file mode 100644 index 000000000..5c275c641 --- /dev/null +++ b/libpod/container_copy_linux.go @@ -0,0 +1,264 @@ +// +build linux + +package libpod + +import ( + "context" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + buildahCopiah "github.com/containers/buildah/copier" + "github.com/containers/buildah/pkg/chrootuser" + "github.com/containers/buildah/util" + "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/storage/pkg/idtools" + "github.com/docker/docker/pkg/archive" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func (c *Container) copyFromArchive(ctx context.Context, path string, reader io.Reader) (func() error, error) { + var ( + mountPoint string + resolvedRoot string + resolvedPath string + unmount func() + err error + ) + + // Make sure that "/" copies the *contents* of the mount point and not + // the directory. + if path == "/" { + path = "/." + } + + // Optimization: only mount if the container is not already. + if c.state.Mounted { + mountPoint = c.state.Mountpoint + unmount = func() {} + } else { + // NOTE: make sure to unmount in error paths. + mountPoint, err = c.mount() + if err != nil { + return nil, err + } + unmount = func() { c.unmount(false) } + } + + if c.state.State == define.ContainerStateRunning { + resolvedRoot = "/" + resolvedPath = c.pathAbs(path) + } else { + resolvedRoot, resolvedPath, err = c.resolvePath(mountPoint, path) + if err != nil { + unmount() + return nil, err + } + } + + // Make sure we chown the files to the container's main user and group ID. + user, err := getContainerUser(c, mountPoint) + if err != nil { + unmount() + return nil, err + } + idPair := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)} + + decompressed, err := archive.DecompressStream(reader) + if err != nil { + unmount() + return nil, err + } + + logrus.Debugf("Container copy *to* %q (resolved: %q) on container %q (ID: %s)", path, resolvedPath, c.Name(), c.ID()) + + return func() error { + defer unmount() + defer decompressed.Close() + putOptions := buildahCopiah.PutOptions{ + UIDMap: c.config.IDMappings.UIDMap, + GIDMap: c.config.IDMappings.GIDMap, + ChownDirs: &idPair, + ChownFiles: &idPair, + } + + return c.joinMountAndExec(ctx, + func() error { + return buildahCopiah.Put(resolvedRoot, resolvedPath, putOptions, decompressed) + }, + ) + }, nil +} + +func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Writer) (func() error, error) { + var ( + mountPoint string + unmount func() + err error + ) + + // Optimization: only mount if the container is not already. + if c.state.Mounted { + mountPoint = c.state.Mountpoint + unmount = func() {} + } else { + // NOTE: make sure to unmount in error paths. + mountPoint, err = c.mount() + if err != nil { + return nil, err + } + unmount = func() { c.unmount(false) } + } + + statInfo, resolvedRoot, resolvedPath, err := c.stat(ctx, mountPoint, path) + if err != nil { + unmount() + return nil, err + } + + // We optimistically chown to the host user. In case of a hypothetical + // container-to-container copy, the reading side will chown back to the + // container user. + user, err := getContainerUser(c, mountPoint) + if err != nil { + unmount() + return nil, err + } + hostUID, hostGID, err := util.GetHostIDs( + idtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), + idtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), + user.UID, + user.GID, + ) + if err != nil { + unmount() + return nil, err + } + idPair := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)} + + logrus.Debugf("Container copy *from* %q (resolved: %q) on container %q (ID: %s)", path, resolvedPath, c.Name(), c.ID()) + + return func() error { + defer unmount() + getOptions := buildahCopiah.GetOptions{ + // Unless the specified points to ".", we want to copy the base directory. + KeepDirectoryNames: statInfo.IsDir && filepath.Base(path) != ".", + UIDMap: c.config.IDMappings.UIDMap, + GIDMap: c.config.IDMappings.GIDMap, + ChownDirs: &idPair, + ChownFiles: &idPair, + Excludes: []string{"dev", "proc", "sys"}, + // Ignore EPERMs when copying from rootless containers + // since we cannot read TTY devices. Those are owned + // by the host's root and hence "nobody" inside the + // container's user namespace. + IgnoreUnreadable: rootless.IsRootless() && c.state.State == define.ContainerStateRunning, + } + return c.joinMountAndExec(ctx, + func() error { + return buildahCopiah.Get(resolvedRoot, "", getOptions, []string{resolvedPath}, writer) + }, + ) + }, nil +} + +// getContainerUser returns the specs.User and ID mappings of the container. +func getContainerUser(container *Container, mountPoint string) (specs.User, error) { + userspec := container.Config().User + + uid, gid, _, err := chrootuser.GetUser(mountPoint, userspec) + u := specs.User{ + UID: uid, + GID: gid, + Username: userspec, + } + + if !strings.Contains(userspec, ":") { + groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID)) + if err2 != nil { + if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil { + err = err2 + } + } else { + u.AdditionalGids = groups + } + } + + return u, err +} + +// idtoolsToRuntimeSpec converts idtools ID mapping to the one of the runtime spec. +func idtoolsToRuntimeSpec(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxIDMapping) { + for _, idmap := range idMaps { + tempIDMap := specs.LinuxIDMapping{ + ContainerID: uint32(idmap.ContainerID), + HostID: uint32(idmap.HostID), + Size: uint32(idmap.Size), + } + convertedIDMap = append(convertedIDMap, tempIDMap) + } + return convertedIDMap +} + +// joinMountAndExec executes the specified function `f` inside the container's +// mount and PID namespace. That allows for having the exact view on the +// container's file system. +// +// Note, if the container is not running `f()` will be executed as is. +func (c *Container) joinMountAndExec(ctx context.Context, f func() error) error { + if c.state.State != define.ContainerStateRunning { + return f() + } + + // Container's running, so we need to execute `f()` inside its mount NS. + errChan := make(chan error) + go func() { + runtime.LockOSThread() + + // Join the mount and PID NS of the container. + getFD := func(ns LinuxNS) (*os.File, error) { + nsPath, err := c.namespacePath(ns) + if err != nil { + return nil, err + } + return os.Open(nsPath) + } + + mountFD, err := getFD(MountNS) + if err != nil { + errChan <- err + return + } + defer mountFD.Close() + + pidFD, err := getFD(PIDNS) + if err != nil { + errChan <- err + return + } + defer pidFD.Close() + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + errChan <- err + return + } + if err := unix.Setns(int(pidFD.Fd()), unix.CLONE_NEWPID); err != nil { + errChan <- err + return + } + + if err := unix.Setns(int(mountFD.Fd()), unix.CLONE_NEWNS); err != nil { + errChan <- err + return + } + + // Last but not least, execute the workload. + errChan <- f() + }() + return <-errChan +} diff --git a/libpod/container_copy_unsupported.go b/libpod/container_copy_unsupported.go new file mode 100644 index 000000000..b2bdd3e3d --- /dev/null +++ b/libpod/container_copy_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package libpod + +import ( + "context" + "io" +) + +func (c *Container) copyFromArchive(ctx context.Context, path string, reader io.Reader) (func() error, error) { + return nil, nil +} + +func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Writer) (func() error, error) { + return nil, nil +} diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 2e0c24579..1614211fb 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -13,6 +13,7 @@ import ( "strings" "time" + metadata "github.com/checkpoint-restore/checkpointctl/lib" "github.com/containers/buildah/copier" "github.com/containers/common/pkg/secrets" "github.com/containers/podman/v3/libpod/define" @@ -32,7 +33,6 @@ import ( spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -135,7 +135,7 @@ func (c *Container) ControlSocketPath() string { // CheckpointPath returns the path to the directory containing the checkpoint func (c *Container) CheckpointPath() string { - return filepath.Join(c.bundlePath(), "checkpoint") + return filepath.Join(c.bundlePath(), metadata.CheckpointDirectory) } // PreCheckpointPath returns the path to the directory containing the pre-checkpoint-images @@ -398,10 +398,6 @@ func (c *Container) setupStorageMapping(dest, from *storage.IDMappingOptions) { // Create container root filesystem for use func (c *Container) setupStorage(ctx context.Context) error { - span, _ := opentracing.StartSpanFromContext(ctx, "setupStorage") - span.SetTag("type", "container") - defer span.Finish() - if !c.valid { return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID()) } @@ -1034,10 +1030,6 @@ func (c *Container) cniHosts() string { // Initialize a container, creating it in the runtime func (c *Container) init(ctx context.Context, retainRetries bool) error { - span, _ := opentracing.StartSpanFromContext(ctx, "init") - span.SetTag("struct", "container") - defer span.Finish() - // Unconditionally remove conmon temporary files. // We've been running into far too many issues where they block startup. if err := c.removeConmonFiles(); err != nil { @@ -1110,10 +1102,6 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { // Deletes the container in the runtime, and resets its state to Exited. // The container can be restarted cleanly after this. func (c *Container) cleanupRuntime(ctx context.Context) error { - span, _ := opentracing.StartSpanFromContext(ctx, "cleanupRuntime") - span.SetTag("struct", "container") - defer span.Finish() - // If the container is not ContainerStateStopped or // ContainerStateCreated, do nothing. if !c.ensureState(define.ContainerStateStopped, define.ContainerStateCreated) { @@ -1155,10 +1143,6 @@ func (c *Container) cleanupRuntime(ctx context.Context) error { // Not necessary for ContainerStateExited - the container has already been // removed from the runtime, so init() can proceed freely. func (c *Container) reinit(ctx context.Context, retainRetries bool) error { - span, _ := opentracing.StartSpanFromContext(ctx, "reinit") - span.SetTag("struct", "container") - defer span.Finish() - logrus.Debugf("Recreating container %s in OCI runtime", c.ID()) if err := c.cleanupRuntime(ctx); err != nil { @@ -1306,9 +1290,7 @@ func (c *Container) stop(timeout uint) error { c.lock.Unlock() } - if err := c.ociRuntime.StopContainer(c, timeout, all); err != nil { - return err - } + stopErr := c.ociRuntime.StopContainer(c, timeout, all) if !c.batched { c.lock.Lock() @@ -1317,13 +1299,23 @@ func (c *Container) stop(timeout uint) error { // If the container has already been removed (e.g., via // the cleanup process), there's nothing left to do. case define.ErrNoSuchCtr, define.ErrCtrRemoved: - return nil + return stopErr default: + if stopErr != nil { + logrus.Errorf("Error syncing container %s status: %v", c.ID(), err) + return stopErr + } return err } } } + // We have to check stopErr *after* we lock again - otherwise, we have a + // change of panicing on a double-unlock. Ref: GH Issue 9615 + if stopErr != nil { + return stopErr + } + // Since we're now subject to a race condition with other processes who // may have altered the state (and other data), let's check if the // state has changed. If so, we should return immediately and log a @@ -1811,10 +1803,6 @@ func (c *Container) cleanupStorage() error { func (c *Container) cleanup(ctx context.Context) error { var lastError error - span, _ := opentracing.StartSpanFromContext(ctx, "cleanup") - span.SetTag("struct", "container") - defer span.Finish() - logrus.Debugf("Cleaning up container %s", c.ID()) // Remove healthcheck unit/timer file if it execs @@ -1875,10 +1863,6 @@ func (c *Container) cleanup(ctx context.Context) error { // delete deletes the container and runs any configured poststop // hooks. func (c *Container) delete(ctx context.Context) error { - span, _ := opentracing.StartSpanFromContext(ctx, "delete") - span.SetTag("struct", "container") - defer span.Finish() - if err := c.ociRuntime.DeleteContainer(c); err != nil { return errors.Wrapf(err, "error removing container %s from runtime", c.ID()) } @@ -1894,10 +1878,6 @@ func (c *Container) delete(ctx context.Context) error { // the OCI Runtime Specification (which requires them to run // post-delete, despite the stage name). func (c *Container) postDeleteHooks(ctx context.Context) error { - span, _ := opentracing.StartSpanFromContext(ctx, "postDeleteHooks") - span.SetTag("struct", "container") - defer span.Finish() - if c.state.ExtensionStageHooks != nil { extensionHooks, ok := c.state.ExtensionStageHooks["poststop"] if ok { @@ -2085,6 +2065,10 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s // mount mounts the container's root filesystem func (c *Container) mount() (string, error) { + if c.state.State == define.ContainerStateRemoving { + return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID()) + } + mountPoint, err := c.runtime.storageService.MountContainerImage(c.ID()) if err != nil { return "", errors.Wrapf(err, "error mounting storage for container %s", c.ID()) @@ -2141,26 +2125,11 @@ func (c *Container) canWithPrevious() error { return err } -// writeJSONFile marshalls and writes the given data to a JSON file -// in the bundle path -func (c *Container) writeJSONFile(v interface{}, file string) error { - fileJSON, err := json.MarshalIndent(v, "", " ") - if err != nil { - return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID()) - } - file = filepath.Join(c.bundlePath(), file) - if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil { - return err - } - - return nil -} - // prepareCheckpointExport writes the config and spec to // JSON files for later export func (c *Container) prepareCheckpointExport() error { // save live config - if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil { + if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil { return err } @@ -2171,7 +2140,7 @@ func (c *Container) prepareCheckpointExport() error { logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err) return err } - if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil { + if _, err := metadata.WriteJSONFile(g.Config, c.bundlePath(), metadata.SpecDumpFile); err != nil { return err } diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index dc0418148..24319f4b5 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -19,6 +19,7 @@ import ( "syscall" "time" + metadata "github.com/checkpoint-restore/checkpointctl/lib" cnitypes "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/buildah/pkg/chrootuser" @@ -33,6 +34,7 @@ import ( "github.com/containers/podman/v3/libpod/events" "github.com/containers/podman/v3/pkg/annotations" "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/podman/v3/pkg/checkpoint/crutils" "github.com/containers/podman/v3/pkg/criu" "github.com/containers/podman/v3/pkg/lookup" "github.com/containers/podman/v3/pkg/resolvconf" @@ -47,7 +49,6 @@ import ( spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -320,10 +321,6 @@ func (c *Container) getUserOverrides() *lookup.Overrides { // Generate spec for a container // Accepts a map of the container's dependencies func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "generateSpec") - span.SetTag("type", "container") - defer span.Finish() - overrides := c.getUserOverrides() execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides) if err != nil { @@ -884,80 +881,32 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile) includeFiles := []string{ - "checkpoint", "artifacts", "ctr.log", - "config.dump", - "spec.dump", - "network.status"} + metadata.CheckpointDirectory, + metadata.ConfigDumpFile, + metadata.SpecDumpFile, + metadata.NetworkStatusFile, + } if options.PreCheckPoint { includeFiles[0] = "pre-checkpoint" } // Get root file-system changes included in the checkpoint archive - rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar") - deleteFilesList := filepath.Join(c.bundlePath(), "deleted.files") + var addToTarFiles []string if !options.IgnoreRootfs { // To correctly track deleted files, let's go through the output of 'podman diff' - tarFiles, err := c.runtime.GetDiff("", c.ID()) + rootFsChanges, err := c.runtime.GetDiff("", c.ID()) if err != nil { - return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath) - } - var rootfsIncludeFiles []string - var deletedFiles []string - - for _, file := range tarFiles { - if file.Kind == archive.ChangeAdd { - rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path) - continue - } - if file.Kind == archive.ChangeDelete { - deletedFiles = append(deletedFiles, file.Path) - continue - } - fileName, err := os.Stat(file.Path) - if err != nil { - continue - } - if !fileName.IsDir() && file.Kind == archive.ChangeModify { - rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path) - continue - } + return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID()) } - if len(rootfsIncludeFiles) > 0 { - rootfsTar, err := archive.TarWithOptions(c.state.Mountpoint, &archive.TarOptions{ - Compression: archive.Uncompressed, - IncludeSourceDir: true, - IncludeFiles: rootfsIncludeFiles, - }) - if err != nil { - return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath) - } - rootfsDiffFile, err := os.Create(rootfsDiffPath) - if err != nil { - return errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath) - } - defer rootfsDiffFile.Close() - _, err = io.Copy(rootfsDiffFile, rootfsTar) - if err != nil { - return err - } - - includeFiles = append(includeFiles, "rootfs-diff.tar") + addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath()) + if err != nil { + return err } - if len(deletedFiles) > 0 { - formatJSON, err := json.MarshalIndent(deletedFiles, "", " ") - if err != nil { - return errors.Wrapf(err, "error creating delete files list file %q", deleteFilesList) - } - if err := ioutil.WriteFile(deleteFilesList, formatJSON, 0600); err != nil { - return errors.Wrap(err, "error creating delete files list file") - } - - includeFiles = append(includeFiles, "deleted.files") - } + includeFiles = append(includeFiles, addToTarFiles...) } // Folder containing archived volumes that will be included in the export @@ -1034,8 +983,9 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { return err } - os.Remove(rootfsDiffPath) - os.Remove(deleteFilesList) + for _, file := range addToTarFiles { + os.Remove(filepath.Join(c.bundlePath(), file)) + } if !options.IgnoreVolumes { os.RemoveAll(expVolDir) @@ -1054,23 +1004,6 @@ func (c *Container) checkpointRestoreSupported() error { return nil } -func (c *Container) checkpointRestoreLabelLog(fileName string) error { - // Create the CRIU log file and label it - dumpLog := filepath.Join(c.bundlePath(), fileName) - - logFile, err := os.OpenFile(dumpLog, os.O_CREATE, 0600) - if err != nil { - return errors.Wrap(err, "failed to create CRIU log file") - } - if err := logFile.Close(); err != nil { - logrus.Error(err) - } - if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil { - return err - } - return nil -} - func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error { if err := c.checkpointRestoreSupported(); err != nil { return err @@ -1084,7 +1017,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used") } - if err := c.checkpointRestoreLabelLog("dump.log"); err != nil { + if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "dump.log", c.MountLabel()); err != nil { return err } @@ -1095,11 +1028,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO // Save network.status. This is needed to restore the container with // the same IP. Currently limited to one IP address in a container // with one interface. - formatJSON, err := json.MarshalIndent(c.state.NetworkStatus, "", " ") - if err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(c.bundlePath(), "network.status"), formatJSON, 0644); err != nil { + if _, err := metadata.WriteJSONFile(c.state.NetworkStatus, c.bundlePath(), metadata.NetworkStatusFile); err != nil { return err } @@ -1115,7 +1044,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO } if options.TargetFile != "" { - if err = c.exportCheckpoint(options); err != nil { + if err := c.exportCheckpoint(options); err != nil { return err } } @@ -1135,8 +1064,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO cleanup := []string{ "dump.log", "stats-dump", - "config.dump", - "spec.dump", + metadata.ConfigDumpFile, + metadata.SpecDumpFile, } for _, del := range cleanup { file := filepath.Join(c.bundlePath(), del) @@ -1151,28 +1080,13 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO } func (c *Container) importCheckpoint(input string) error { - archiveFile, err := os.Open(input) - if err != nil { - return errors.Wrap(err, "failed to open checkpoint archive for import") - } - - defer archiveFile.Close() - options := &archive.TarOptions{ - ExcludePatterns: []string{ - // config.dump and spec.dump are only required - // container creation - "config.dump", - "spec.dump", - }, - } - err = archive.Untar(archiveFile, c.bundlePath(), options) - if err != nil { - return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input) + if err := crutils.CRImportCheckpointWithoutConfig(c.bundlePath(), input); err != nil { + return err } // Make sure the newly created config.json exists on disk g := generate.Generator{Config: c.config.Spec} - if err = c.saveSpec(g.Config); err != nil { + if err := c.saveSpec(g.Config); err != nil { return errors.Wrap(err, "saving imported container specification for restore failed") } @@ -1221,7 +1135,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore") } - if err := c.checkpointRestoreLabelLog("restore.log"); err != nil { + if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil { return err } @@ -1244,7 +1158,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Read network configuration from checkpoint // Currently only one interface with one IP is supported. - networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status")) + networkStatus, _, err := metadata.ReadContainerCheckpointNetworkStatus(c.bundlePath()) // If the restored container should get a new name, the IP address of // the container will not be restored. This assumes that if a new name is // specified, the container is restored multiple times. @@ -1254,43 +1168,14 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) { // The file with the network.status does exist. Let's restore the // container with the same IP address / MAC address as during checkpointing. - defer networkStatusFile.Close() - var networkStatus []*cnitypes.Result - networkJSON, err := ioutil.ReadAll(networkStatusFile) - if err != nil { - return err - } - if err := json.Unmarshal(networkJSON, &networkStatus); err != nil { - return err - } if !options.IgnoreStaticIP { - // Take the first IP address - var IP net.IP - if len(networkStatus) > 0 { - if len(networkStatus[0].IPs) > 0 { - IP = networkStatus[0].IPs[0].Address.IP - } - } - if IP != nil { + if IP := metadata.GetIPFromNetworkStatus(networkStatus); IP != nil { // Tell CNI which IP address we want. c.requestedIP = IP } } if !options.IgnoreStaticMAC { - // Take the first device with a defined sandbox. - var MAC net.HardwareAddr - if len(networkStatus) > 0 { - for _, n := range networkStatus[0].Interfaces { - if n.Sandbox != "" { - MAC, err = net.ParseMAC(n.Mac) - if err != nil { - return err - } - break - } - } - } - if MAC != nil { + if MAC := metadata.GetMACFromNetworkStatus(networkStatus); MAC != nil { // Tell CNI which MAC address we want. c.requestedMAC = MAC } @@ -1398,36 +1283,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Before actually restarting the container, apply the root file-system changes if !options.IgnoreRootfs { - rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar") - if _, err := os.Stat(rootfsDiffPath); err == nil { - // Only do this if a rootfs-diff.tar actually exists - rootfsDiffFile, err := os.Open(rootfsDiffPath) - if err != nil { - return errors.Wrap(err, "failed to open root file-system diff file") - } - defer rootfsDiffFile.Close() - if err := c.runtime.ApplyDiffTarStream(c.ID(), rootfsDiffFile); err != nil { - return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath) - } + if err := crutils.CRApplyRootFsDiffTar(c.bundlePath(), c.state.Mountpoint); err != nil { + return err } - deletedFilesPath := filepath.Join(c.bundlePath(), "deleted.files") - if _, err := os.Stat(deletedFilesPath); err == nil { - var deletedFiles []string - deletedFilesJSON, err := ioutil.ReadFile(deletedFilesPath) - if err != nil { - return errors.Wrapf(err, "failed to read deleted files file") - } - if err := json.Unmarshal(deletedFilesJSON, &deletedFiles); err != nil { - return errors.Wrapf(err, "failed to unmarshal deleted files file %s", deletedFilesPath) - } - for _, deleteFile := range deletedFiles { - // Using RemoveAll as deletedFiles, which is generated from 'podman diff' - // lists completely deleted directories as a single entry: 'D /root'. - err = os.RemoveAll(filepath.Join(c.state.Mountpoint, deleteFile)) - if err != nil { - return errors.Wrapf(err, "failed to delete files from container %s during restore", c.ID()) - } - } + + if err := crutils.CRRemoveDeletedFiles(c.ID(), c.bundlePath(), c.state.Mountpoint); err != nil { + return err } } @@ -1452,7 +1313,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err != nil { logrus.Debugf("Non-fatal: removal of pre-checkpoint directory (%s) failed: %v", c.PreCheckPointPath(), err) } - cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status", "rootfs-diff.tar", "deleted.files"} + cleanup := [...]string{ + "restore.log", + "dump.log", + "stats-dump", + "stats-restore", + metadata.NetworkStatusFile, + metadata.RootFsDiffTar, + metadata.DeletedFilesFile, + } for _, del := range cleanup { file := filepath.Join(c.bundlePath(), del) err = os.Remove(file) diff --git a/libpod/container_log.go b/libpod/container_log.go index a3b700004..c207df819 100644 --- a/libpod/container_log.go +++ b/libpod/container_log.go @@ -29,7 +29,6 @@ func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logCh case define.NoLogging: return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs") case define.JournaldLogging: - // TODO Skip sending logs until journald logs can be read return c.readFromJournal(ctx, options, logChannel) case define.JSONLogging: // TODO provide a separate implementation of this when Conmon diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go index 5792633b0..4a541b6e7 100644 --- a/libpod/container_log_linux.go +++ b/libpod/container_log_linux.go @@ -52,6 +52,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption if time.Now().Before(options.Since) { return nil } + // coreos/go-systemd/sdjournal expects a negative time.Duration for times in the past config.Since = -time.Since(options.Since) } config.Matches = append(config.Matches, journal.Match{ diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go index 5245314ae..d798963b1 100644 --- a/libpod/container_path_resolution.go +++ b/libpod/container_path_resolution.go @@ -1,3 +1,4 @@ +// +linux package libpod import ( @@ -10,6 +11,19 @@ import ( "github.com/sirupsen/logrus" ) +// pathAbs returns an absolute path. If the specified path is +// relative, it will be resolved relative to the container's working dir. +func (c *Container) pathAbs(path string) string { + if !filepath.IsAbs(path) { + // If the containerPath is not absolute, it's relative to the + // container's working dir. To be extra careful, let's first + // join the working dir with "/", and the add the containerPath + // to it. + path = filepath.Join(filepath.Join("/", c.WorkingDir()), path) + } + return path +} + // resolveContainerPaths resolves the container's mount point and the container // path as specified by the user. Both may resolve to paths outside of the // container's mount point when the container path hits a volume or bind mount. @@ -20,14 +34,7 @@ import ( // the host). func (c *Container) resolvePath(mountPoint string, containerPath string) (string, string, error) { // Let's first make sure we have a path relative to the mount point. - pathRelativeToContainerMountPoint := containerPath - if !filepath.IsAbs(containerPath) { - // If the containerPath is not absolute, it's relative to the - // container's working dir. To be extra careful, let's first - // join the working dir with "/", and the add the containerPath - // to it. - pathRelativeToContainerMountPoint = filepath.Join(filepath.Join("/", c.WorkingDir()), containerPath) - } + pathRelativeToContainerMountPoint := c.pathAbs(containerPath) resolvedPathOnTheContainerMountPoint := filepath.Join(mountPoint, pathRelativeToContainerMountPoint) pathRelativeToContainerMountPoint = strings.TrimPrefix(pathRelativeToContainerMountPoint, mountPoint) pathRelativeToContainerMountPoint = filepath.Join("/", pathRelativeToContainerMountPoint) diff --git a/libpod/container_stat_linux.go b/libpod/container_stat_linux.go new file mode 100644 index 000000000..0b4d9e2df --- /dev/null +++ b/libpod/container_stat_linux.go @@ -0,0 +1,181 @@ +// +build linux + +package libpod + +import ( + "context" + "os" + "path/filepath" + "strings" + + "github.com/containers/buildah/copier" + "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v3/pkg/copy" + "github.com/pkg/errors" +) + +// statInsideMount stats the specified path *inside* the container's mount and PID +// namespace. It returns the file info along with the resolved root ("/") and +// the resolved path (relative to the root). +func (c *Container) statInsideMount(ctx context.Context, containerPath string) (*copier.StatForItem, string, string, error) { + resolvedRoot := "/" + resolvedPath := c.pathAbs(containerPath) + var statInfo *copier.StatForItem + + err := c.joinMountAndExec(ctx, + func() error { + var statErr error + statInfo, statErr = secureStat(resolvedRoot, resolvedPath) + return statErr + }, + ) + + return statInfo, resolvedRoot, resolvedPath, err +} + +// statOnHost stats the specified path *on the host*. It returns the file info +// along with the resolved root and the resolved path. Both paths are absolute +// to the host's root. Note that the paths may resolved outside the +// container's mount point (e.g., to a volume or bind mount). +func (c *Container) statOnHost(ctx context.Context, mountPoint string, containerPath string) (*copier.StatForItem, string, string, error) { + // Now resolve the container's path. It may hit a volume, it may hit a + // bind mount, it may be relative. + resolvedRoot, resolvedPath, err := c.resolvePath(mountPoint, containerPath) + if err != nil { + return nil, "", "", err + } + + statInfo, err := secureStat(resolvedRoot, resolvedPath) + return statInfo, resolvedRoot, resolvedPath, err +} + +func (c *Container) stat(ctx context.Context, containerMountPoint string, containerPath string) (*define.FileInfo, string, string, error) { + var ( + resolvedRoot string + resolvedPath string + absContainerPath string + statInfo *copier.StatForItem + statErr error + ) + + // Make sure that "/" copies the *contents* of the mount point and not + // the directory. + if containerPath == "/" { + containerPath = "/." + } + + // Wildcards are not allowed. + // TODO: it's now technically possible wildcards. + // We may consider enabling support in the future. + if strings.Contains(containerPath, "*") { + return nil, "", "", copy.ErrENOENT + } + + if c.state.State == define.ContainerStateRunning { + // If the container is running, we need to join it's mount namespace + // and stat there. + statInfo, resolvedRoot, resolvedPath, statErr = c.statInsideMount(ctx, containerPath) + } else { + // If the container is NOT running, we need to resolve the path + // on the host. + statInfo, resolvedRoot, resolvedPath, statErr = c.statOnHost(ctx, containerMountPoint, containerPath) + } + + if statErr != nil { + if statInfo == nil { + return nil, "", "", statErr + } + // Not all errors from secureStat map to ErrNotExist, so we + // have to look into the error string. Turning it into an + // ENOENT let's the API handlers return the correct status code + // which is crucial for the remote client. + if os.IsNotExist(statErr) || strings.Contains(statErr.Error(), "o such file or directory") { + statErr = copy.ErrENOENT + } + } + + if statInfo.IsSymlink { + // Symlinks are already evaluated and always relative to the + // container's mount point. + absContainerPath = statInfo.ImmediateTarget + } else if strings.HasPrefix(resolvedPath, containerMountPoint) { + // If the path is on the container's mount point, strip it off. + absContainerPath = strings.TrimPrefix(resolvedPath, containerMountPoint) + absContainerPath = filepath.Join("/", absContainerPath) + } else { + // No symlink and not on the container's mount point, so let's + // move it back to the original input. It must have evaluated + // to a volume or bind mount but we cannot return host paths. + absContainerPath = containerPath + } + + // Preserve the base path as specified by the user. The `filepath` + // packages likes to remove trailing slashes and dots that are crucial + // to the copy logic. + absContainerPath = copy.PreserveBasePath(containerPath, absContainerPath) + resolvedPath = copy.PreserveBasePath(containerPath, resolvedPath) + + info := &define.FileInfo{ + IsDir: statInfo.IsDir, + Name: filepath.Base(absContainerPath), + Size: statInfo.Size, + Mode: statInfo.Mode, + ModTime: statInfo.ModTime, + LinkTarget: absContainerPath, + } + + return info, resolvedRoot, resolvedPath, statErr +} + +// secureStat extracts file info for path in a chroot'ed environment in root. +func secureStat(root string, path string) (*copier.StatForItem, error) { + var glob string + var err error + + // If root and path are equal, then dir must be empty and the glob must + // be ".". + if filepath.Clean(root) == filepath.Clean(path) { + glob = "." + } else { + glob, err = filepath.Rel(root, path) + if err != nil { + return nil, err + } + } + + globStats, err := copier.Stat(root, "", copier.StatOptions{}, []string{glob}) + if err != nil { + return nil, err + } + + if len(globStats) != 1 { + return nil, errors.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats)) + } + if len(globStats) != 1 { + return nil, errors.Errorf("internal error: secureStat: expected 1 result but got %d", len(globStats[0].Results)) + } + + // NOTE: the key in the map differ from `glob` when hitting symlink. + // Hence, we just take the first (and only) key/value pair. + for _, stat := range globStats[0].Results { + var statErr error + if stat.Error != "" { + statErr = errors.New(stat.Error) + } + // If necessary evaluate the symlink + if stat.IsSymlink { + target, err := copier.Eval(root, path, copier.EvalOptions{}) + if err != nil { + return nil, errors.Wrap(err, "error evaluating symlink in container") + } + // Need to make sure the symlink is relative to the root! + target = strings.TrimPrefix(target, root) + target = filepath.Join("/", target) + stat.ImmediateTarget = target + } + return stat, statErr + } + + // Nothing found! + return nil, copy.ErrENOENT +} diff --git a/libpod/container_stat_unsupported.go b/libpod/container_stat_unsupported.go new file mode 100644 index 000000000..c002e4d32 --- /dev/null +++ b/libpod/container_stat_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package libpod + +import ( + "context" + + "github.com/containers/podman/v3/libpod/define" +) + +func (c *Container) stat(ctx context.Context, containerMountPoint string, containerPath string) (*define.FileInfo, string, string, error) { + return nil, "", "", nil +} diff --git a/libpod/define/fileinfo.go b/libpod/define/fileinfo.go new file mode 100644 index 000000000..2c7b6fe99 --- /dev/null +++ b/libpod/define/fileinfo.go @@ -0,0 +1,16 @@ +package define + +import ( + "os" + "time" +) + +// FileInfo describes the attributes of a file or diretory. +type FileInfo struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + ModTime time.Time `json:"mtime"` + IsDir bool `json:"isDir"` + LinkTarget string `json:"linkTarget"` +} diff --git a/libpod/define/mount.go b/libpod/define/mount.go new file mode 100644 index 000000000..1b0d019c8 --- /dev/null +++ b/libpod/define/mount.go @@ -0,0 +1,12 @@ +package define + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" + // TypeVolume is the type for named volumes + TypeVolume = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeDevpts is the type for creating a devpts + TypeDevpts = "devpts" +) diff --git a/libpod/define/version.go b/libpod/define/version.go index 67dc730ac..5249b5d84 100644 --- a/libpod/define/version.go +++ b/libpod/define/version.go @@ -5,7 +5,7 @@ import ( "strconv" "time" - podmanVersion "github.com/containers/podman/v3/version" + "github.com/containers/podman/v3/version" ) // Overwritten at build time @@ -42,8 +42,8 @@ func GetVersion() (Version, error) { } } return Version{ - APIVersion: podmanVersion.APIVersion.String(), - Version: podmanVersion.Version.String(), + APIVersion: version.APIVersion[version.Libpod][version.CurrentAPI].String(), + Version: version.Version.String(), GoVersion: runtime.Version(), GitCommit: gitCommit, BuiltTime: time.Unix(buildTime, 0).Format(time.ANSIC), diff --git a/libpod/image/image.go b/libpod/image/image.go index 7c760a79a..12dc22360 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -38,7 +38,6 @@ import ( "github.com/containers/storage" digest "github.com/opencontainers/go-digest" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - opentracing "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -143,11 +142,7 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) { // New creates a new image object where the image could be local // or remote -func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType) (*Image, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "newImage") - span.SetTag("type", "runtime") - defer span.Finish() - +func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType, progress chan types.ProgressProperties) (*Image, error) { // We don't know if the image is local or not ... check local first if pullType != util.PullImageAlways { newImage, err := ir.NewFromLocal(name) @@ -162,7 +157,7 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile if signaturePolicyPath == "" { signaturePolicyPath = ir.SignaturePolicyPath } - imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label) + imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label, progress) if err != nil { return nil, err } @@ -323,7 +318,7 @@ func (ir *Runtime) LoadAllImagesFromDockerArchive(ctx context.Context, fileName } defer goal.cleanUp() - imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil) + imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil, nil) if err != nil { return nil, err } @@ -816,7 +811,7 @@ func (i *Image) UntagImage(tag string) error { // PushImageToHeuristicDestination pushes the given image to "destination", which is heuristically parsed. // Use PushImageToReference if the destination is known precisely. -func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { +func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged, progress chan types.ProgressProperties) error { if destination == "" { return errors.Wrapf(syscall.EINVAL, "destination image name must be specified") } @@ -834,11 +829,11 @@ func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination return err } } - return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, digestFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags) + return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, digestFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags, progress) } // PushImageToReference pushes the given image to a location described by the given path -func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { +func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged, progress chan types.ProgressProperties) error { sc := GetSystemContext(signaturePolicyPath, authFile, forceCompress) sc.BlobInfoCacheDir = filepath.Join(i.imageruntime.store.GraphRoot(), "cache") @@ -859,6 +854,10 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere } copyOptions := getCopyOptions(sc, writer, nil, dockerRegistryOptions, signingOptions, manifestMIMEType, additionalDockerArchiveTags) copyOptions.DestinationCtx.SystemRegistriesConfPath = registries.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place. + if progress != nil { + copyOptions.Progress = progress + copyOptions.ProgressInterval = time.Second + } // Copy the image to the remote destination manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions) if err != nil { @@ -1293,21 +1292,11 @@ func (i *Image) inspect(ctx context.Context, calculateSize bool) (*inspect.Image // Inspect returns an image's inspect data func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "imageInspect") - - span.SetTag("type", "image") - defer span.Finish() - return i.inspect(ctx, true) } // InspectNoSize returns an image's inspect data without calculating the size for the image func (i *Image) InspectNoSize(ctx context.Context) (*inspect.ImageData, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "imageInspectNoSize") - - span.SetTag("type", "image") - defer span.Finish() - return i.inspect(ctx, false) } @@ -1648,7 +1637,7 @@ func (i *Image) Save(ctx context.Context, source, format, output string, moreTag return err } } - if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", "", writer, compress, SigningOptions{RemoveSignatures: removeSignatures}, &DockerRegistryOptions{}, additionaltags); err != nil { + if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", "", writer, compress, SigningOptions{RemoveSignatures: removeSignatures}, &DockerRegistryOptions{}, additionaltags, nil); err != nil { return errors.Wrapf(err, "unable to save %q", source) } i.newImageEvent(events.Save) diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go index 1ea4f6c11..3e6e7b9db 100644 --- a/libpod/image/image_test.go +++ b/libpod/image/image_test.go @@ -94,9 +94,9 @@ func TestImage_NewFromLocal(t *testing.T) { ir, err := NewImageRuntimeFromOptions(so) assert.NoError(t, err) ir.Eventer = events.NewNullEventer() - bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing) + bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil) assert.NoError(t, err) - bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing) + bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil) assert.NoError(t, err) tm := makeLocalMatrix(bb, bbglibc) @@ -140,7 +140,7 @@ func TestImage_New(t *testing.T) { // Iterate over the names and delete the image // after the pull for _, img := range names { - newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing) + newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil) assert.NoError(t, err) assert.NotEqual(t, newImage.ID(), "") err = newImage.Remove(context.Background(), false) @@ -169,7 +169,7 @@ func TestImage_MatchRepoTag(t *testing.T) { ir, err := NewImageRuntimeFromOptions(so) assert.NoError(t, err) ir.Eventer = events.NewNullEventer() - newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, nil, util.PullImageMissing) + newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, nil, util.PullImageMissing, nil) assert.NoError(t, err) err = newImage.TagImage("foo:latest") assert.NoError(t, err) diff --git a/libpod/image/layer_tree.go b/libpod/image/layer_tree.go index dde39dba1..aa3084449 100644 --- a/libpod/image/layer_tree.go +++ b/libpod/image/layer_tree.go @@ -4,7 +4,6 @@ import ( "context" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -188,7 +187,12 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { node, exists := t.nodes[child.TopLayer()] if !exists { - return nil, errors.Errorf("layer not found in layer tree: %q", child.TopLayer()) + // Note: erroring out in this case has turned out having been a + // mistake. Users may not be able to recover, so we're now + // throwing a warning to guide them to resolve the issue and + // turn the errors non-fatal. + logrus.Warnf("Layer %s not found in layer. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer()) + return nil, nil } childOCI, err := t.toOCI(ctx, child) diff --git a/libpod/image/pull.go b/libpod/image/pull.go index 3cb1e57c7..58160b52f 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -6,6 +6,7 @@ import ( "io" "path/filepath" "strings" + "time" "github.com/containers/common/pkg/retry" cp "github.com/containers/image/v5/copy" @@ -22,7 +23,6 @@ import ( "github.com/containers/podman/v3/libpod/events" "github.com/containers/podman/v3/pkg/errorhandling" "github.com/containers/podman/v3/pkg/registries" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -171,9 +171,6 @@ func (ir *Runtime) getPullRefPairsFromDockerArchiveReference(ctx context.Context // pullGoalFromImageReference returns a pull goal for a single ImageReference, depending on the used transport. // Note that callers are responsible for invoking (*pullGoal).cleanUp() to clean up possibly open resources. func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.ImageReference, imgName string, sc *types.SystemContext) (*pullGoal, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "pullGoalFromImageReference") - defer span.Finish() - // supports pulling from docker-archive, oci, and registries switch srcRef.Transport().Name() { case DockerArchive: @@ -241,10 +238,7 @@ func toLocalImageName(imageName string) string { // pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries. // Use pullImageFromReference if the source is known precisely. -func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromHeuristicSource") - defer span.Finish() - +func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string, progress chan types.ProgressProperties) ([]string, error) { var goal *pullGoal sc := GetSystemContext(signaturePolicyPath, authfile, false) if dockerOptions != nil { @@ -275,14 +269,11 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s } } defer goal.cleanUp() - return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label) + return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label, progress) } // pullImageFromReference pulls an image from a types.imageReference. func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions) ([]string, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromReference") - defer span.Finish() - sc := GetSystemContext(signaturePolicyPath, authfile, false) if dockerOptions != nil { sc.OSChoice = dockerOptions.OSChoice @@ -294,7 +285,7 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef)) } defer goal.cleanUp() - return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil) + return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil, nil) } func cleanErrorMessage(err error) string { @@ -304,10 +295,7 @@ func cleanErrorMessage(err error) string { } // doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead. -func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "doPullImage") - defer span.Finish() - +func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string, progress chan types.ProgressProperties) ([]string, error) { policyContext, err := getPolicyContext(sc) if err != nil { return nil, err @@ -328,6 +316,10 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa for _, imageInfo := range goal.refPairs { copyOptions := getCopyOptions(sc, writer, dockerOptions, nil, signingOptions, "", nil) copyOptions.SourceCtx.SystemRegistriesConfPath = systemRegistriesConfPath // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place. + if progress != nil { + copyOptions.Progress = progress + copyOptions.ProgressInterval = time.Second + } // Print the following statement only when pulling from a docker or atomic registry if writer != nil && (imageInfo.srcRef.Transport().Name() == DockerTransport || imageInfo.srcRef.Transport().Name() == AtomicTransport) { if _, err := io.WriteString(writer, fmt.Sprintf("Trying to pull %s...\n", imageInfo.image)); err != nil { diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index 26f15d9c8..3875878ed 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -822,6 +822,46 @@ func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *Container return nil } +// SafeRewriteContainerConfig rewrites a container's configuration. +// It's safer than RewriteContainerConfig, but still has limitations. Please +// read the comment in state.go before using. +func (s *InMemoryState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error { + if !ctr.valid { + return define.ErrCtrRemoved + } + + if _, err := s.nameIndex.Get(newName); err == nil { + return errors.Wrapf(define.ErrCtrExists, "name %s is in use", newName) + } + + // If the container does not exist, return error + stateCtr, ok := s.containers[ctr.ID()] + if !ok { + ctr.valid = false + return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) + } + + // Change name in registry. + if s.namespace != "" { + nsIndex, ok := s.namespaceIndexes[s.namespace] + if !ok { + return define.ErrInternal + } + nsIndex.nameIndex.Release(oldName) + if err := nsIndex.nameIndex.Reserve(newName, ctr.ID()); err != nil { + return errors.Wrapf(err, "error registering name %s", newName) + } + } + s.nameIndex.Release(oldName) + if err := s.nameIndex.Reserve(newName, ctr.ID()); err != nil { + return errors.Wrapf(err, "error registering name %s", newName) + } + + stateCtr.config = newCfg + + return nil +} + // RewritePodConfig rewrites a pod's configuration. // This function is DANGEROUS, even with in-memory state. // Please read the full comment on it in state.go before using it. diff --git a/libpod/kube.go b/libpod/kube.go index 0c4f9f0a0..6feb69fea 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -676,8 +676,18 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { return nil, errors.Wrapf(err, "unable to sync container during YAML generation") } + mountpoint := c.state.Mountpoint + if mountpoint == "" { + var err error + mountpoint, err = c.mount() + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s mountpoint", c.ID()) + } + defer c.unmount(false) + } logrus.Debugf("Looking in container for user: %s", c.User()) - execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.User(), nil) + + execUser, err := lookup.GetUserGroupInfo(mountpoint, c.User(), nil) if err != nil { return nil, err } diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 0526e646e..d6968a6b5 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -809,7 +809,7 @@ func (r *Runtime) teardownCNI(ctr *Container) error { requestedMAC = ctr.config.StaticMAC } - podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), networks, ctr.config.PortMappings, requestedIP, requestedMAC, ContainerNetworkDescriptions{}) + podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), networks, ctr.config.PortMappings, requestedIP, requestedMAC, ctr.state.NetInterfaceDescriptions) if err := r.netPlugin.TearDownPod(podNetwork); err != nil { return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID()) diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index de7630c06..ef5f6fb0c 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -28,6 +28,7 @@ import ( "github.com/containers/podman/v3/libpod/define" "github.com/containers/podman/v3/libpod/logs" "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/podman/v3/pkg/checkpoint/crutils" "github.com/containers/podman/v3/pkg/errorhandling" "github.com/containers/podman/v3/pkg/lookup" "github.com/containers/podman/v3/pkg/rootless" @@ -112,9 +113,11 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime // TODO: probe OCI runtime for feature and enable automatically if // available. - runtime.supportsJSON = supportsJSON[name] - runtime.supportsNoCgroups = supportsNoCgroups[name] - runtime.supportsKVM = supportsKVM[name] + + base := filepath.Base(name) + runtime.supportsJSON = supportsJSON[base] + runtime.supportsNoCgroups = supportsNoCgroups[base] + runtime.supportsKVM = supportsKVM[base] foundPath := false for _, path := range paths { @@ -837,16 +840,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) { // SupportsCheckpoint checks if the OCI runtime supports checkpointing // containers. func (r *ConmonOCIRuntime) SupportsCheckpoint() bool { - // Check if the runtime implements checkpointing. Currently only - // runc's checkpoint/restore implementation is supported. - cmd := exec.Command(r.path, "checkpoint", "--help") - if err := cmd.Start(); err != nil { - return false - } - if err := cmd.Wait(); err == nil { - return true - } - return false + return crutils.CRRuntimeSupportsCheckpointRestore(r.path) } // SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error diff --git a/libpod/options.go b/libpod/options.go index 6344e1acc..48888a2f2 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -64,15 +64,22 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption { setField = true } + graphDriverChanged := false if config.GraphDriverName != "" { rt.storageConfig.GraphDriverName = config.GraphDriverName rt.storageSet.GraphDriverNameSet = true setField = true + graphDriverChanged = true } if config.GraphDriverOptions != nil { - rt.storageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions)) - copy(rt.storageConfig.GraphDriverOptions, config.GraphDriverOptions) + if graphDriverChanged { + rt.storageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions)) + copy(rt.storageConfig.GraphDriverOptions, config.GraphDriverOptions) + } else { + // append new options after what is specified in the config files + rt.storageConfig.GraphDriverOptions = append(rt.storageConfig.GraphDriverOptions, config.GraphDriverOptions...) + } setField = true } diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go index e97985180..df690e914 100644 --- a/libpod/rootless_cni_linux.go +++ b/libpod/rootless_cni_linux.go @@ -265,7 +265,7 @@ func startRootlessCNIInfraContainer(ctx context.Context, r *Runtime) (*Container } logrus.Debugf("rootless CNI: ensuring image %q to exist", imageName) newImage, err := r.ImageRuntime().New(ctx, imageName, "", "", nil, nil, - image.SigningOptions{}, nil, util.PullImageMissing) + image.SigningOptions{}, nil, util.PullImageMissing, nil) if err != nil { return nil, err } diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 8bf862bf2..19690d79b 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -22,7 +22,6 @@ import ( "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -74,8 +73,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config } // RenameContainer renames the given container. -// The given container object will be rendered unusable, and a new, renamed -// Container will be returned. +// Returns a copy of the container that has been renamed if successful. func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) { ctr.lock.Lock() defer ctr.lock.Unlock() @@ -88,26 +86,6 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s return nil, define.RegexError } - // Check if the name is available. - // This is *100% NOT ATOMIC* so any failures in-flight will do - // *VERY BAD THINGS* to the state. So we have to try and catch all we - // can before starting. - if _, err := r.state.LookupContainerID(newName); err == nil { - return nil, errors.Wrapf(define.ErrCtrExists, "name %s is already in use by another container", newName) - } - if _, err := r.state.LookupPod(newName); err == nil { - return nil, errors.Wrapf(define.ErrPodExists, "name %s is already in use by another pod", newName) - } - - // TODO: Investigate if it is possible to remove this limitation. - depCtrs, err := r.state.ContainerInUse(ctr) - if err != nil { - return nil, err - } - if len(depCtrs) > 0 { - return nil, errors.Wrapf(define.ErrCtrExists, "cannot rename container %s as it is in use by other containers: %v", ctr.ID(), strings.Join(depCtrs, ",")) - } - // We need to pull an updated config, in case another rename fired and // the config was re-written. newConf, err := r.state.GetContainerConfig(ctr.ID()) @@ -116,95 +94,33 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s } ctr.config = newConf - // TODO: This is going to fail if we have active exec sessions, too. - // Investigate fixing that at a later date. - - var pod *Pod - if ctr.config.Pod != "" { - tmpPod, err := r.state.Pod(ctr.config.Pod) - if err != nil { - return nil, errors.Wrapf(err, "error retrieving container %s pod", ctr.ID()) - } - pod = tmpPod - // Lock pod to ensure it's not removed while we're working - pod.lock.Lock() - defer pod.lock.Unlock() - } - - // Lock all volumes to ensure they are not removed while we're working - volsLocked := make(map[string]bool) - for _, namedVol := range ctr.config.NamedVolumes { - if volsLocked[namedVol.Name] { - continue - } - vol, err := r.state.Volume(namedVol.Name) - if err != nil { - return nil, errors.Wrapf(err, "error retrieving volume used by container %s", ctr.ID()) - } - - volsLocked[vol.Name()] = true - vol.lock.Lock() - defer vol.lock.Unlock() - } - logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName) - // Step 1: remove the old container. - if pod != nil { - if err := r.state.RemoveContainerFromPod(pod, ctr); err != nil { - return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID()) - } - } else { - if err := r.state.RemoveContainer(ctr); err != nil { - return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID()) - } - } - - // Step 2: Make a new container based on the old one. - // TODO: Should we deep-copy the container config and state, to be safe? - newCtr := new(Container) - newCtr.config = ctr.config - newCtr.state = ctr.state - newCtr.lock = ctr.lock - newCtr.ociRuntime = ctr.ociRuntime - newCtr.runtime = r - newCtr.rootlessSlirpSyncR = ctr.rootlessSlirpSyncR - newCtr.rootlessSlirpSyncW = ctr.rootlessSlirpSyncW - newCtr.rootlessPortSyncR = ctr.rootlessPortSyncR - newCtr.rootlessPortSyncW = ctr.rootlessPortSyncW - - newCtr.valid = true - newCtr.config.Name = newName - - // Step 3: Add that new container to the DB - if pod != nil { - if err := r.state.AddContainerToPod(pod, newCtr); err != nil { - return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID()) - } - } else { - if err := r.state.AddContainer(newCtr); err != nil { - return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID()) - } - } + // Step 1: Alter the config. Save the old name, we need it to rewrite + // the config. + oldName := ctr.config.Name + ctr.config.Name = newName - // Step 4: Save the new container, to force the state to be written to - // the DB. This may not be necessary, depending on DB implementation, - // but let's do it to be safe. - if err := newCtr.save(); err != nil { - return nil, err + // Step 2: rewrite the old container's config in the DB. + if err := r.state.SafeRewriteContainerConfig(ctr, oldName, ctr.config.Name, ctr.config); err != nil { + // Assume the rename failed. + // Set config back to the old name so reflect what is actually + // present in the DB. + ctr.config.Name = oldName + return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID()) } - // Step 5: rename the container in c/storage. + // Step 3: rename the container in c/storage. // This can fail if the name is already in use by a non-Podman // container. This puts us in a bad spot - we've already renamed the // container in Podman. We can swap the order, but then we have the // opposite problem. Atomicity is a real problem here, with no easy // solution. - if err := r.store.SetNames(newCtr.ID(), []string{newCtr.Name()}); err != nil { + if err := r.store.SetNames(ctr.ID(), []string{ctr.Name()}); err != nil { return nil, err } - return newCtr, nil + return ctr, nil } func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) { @@ -262,10 +178,6 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf } func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (*Container, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "newContainer") - span.SetTag("type", "runtime") - defer span.Finish() - ctr, err := r.initContainerVariables(rSpec, nil) if err != nil { return nil, errors.Wrapf(err, "error initializing container variables") @@ -555,10 +467,6 @@ func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, // infra container protections, and *not* remove from the database (as pod // remove will handle that). func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, removeVolume, removePod bool) error { - span, _ := opentracing.StartSpanFromContext(ctx, "removeContainer") - span.SetTag("type", "runtime") - defer span.Finish() - if !c.valid { if ok, _ := r.state.HasContainer(c.ID()); !ok { // Container probably already removed @@ -806,7 +714,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol id, err := r.state.LookupContainerID(idOrName) if err != nil { - return "", errors.Wrapf(err, "failed to find container %q in state", idOrName) + return "", err } // Begin by trying a normal removal. Valid containers will be removed normally. @@ -836,7 +744,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol return id, err } if !exists { - return id, errors.Wrapf(err, "failed to find container ID %q for eviction", id) + return id, err } // Re-create a container struct for removal purposes diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index 6e1105b9e..90b11f8ca 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -316,7 +316,8 @@ func (r *Runtime) LoadImageFromSingleImageArchive(ctx context.Context, writer io } { src, err := referenceFn() if err == nil && src != nil { - if newImages, err := r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer); err == nil { + newImages, err := r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer) + if err == nil { return getImageNames(newImages), nil } saveErr = err @@ -325,6 +326,15 @@ func (r *Runtime) LoadImageFromSingleImageArchive(ctx context.Context, writer io return "", errors.Wrapf(saveErr, "error pulling image") } +// RemoveImageFromStorage goes directly to storage and attempts to remove +// the specified image. This is dangerous and should only be done if libpod +// reports that image is not known. This call is useful if you have a corrupted +// image that was never fully added to the libpod database. +func (r *Runtime) RemoveImageFromStorage(id string) error { + _, err := r.store.DeleteImage(id, true) + return err +} + func getImageNames(images []*image.Image) string { var names string for i := range images { diff --git a/libpod/runtime_img_test.go b/libpod/runtime_img_test.go index 7d6390c85..c25f3f08c 100644 --- a/libpod/runtime_img_test.go +++ b/libpod/runtime_img_test.go @@ -37,7 +37,7 @@ func TestGetRegistries(t *testing.T) { registryPath, err := createTmpFile([]byte(registry)) assert.NoError(t, err) defer os.Remove(registryPath) - os.Setenv("REGISTRIES_CONFIG_PATH", registryPath) + os.Setenv("CONTAINERS_REGISTRIES_CONF", registryPath) registries, err := sysreg.GetRegistries() assert.NoError(t, err) assert.True(t, reflect.DeepEqual(registries, []string{"one"})) @@ -46,7 +46,7 @@ func TestGetRegistries(t *testing.T) { func TestGetInsecureRegistries(t *testing.T) { registryPath, err := createTmpFile([]byte(registry)) assert.NoError(t, err) - os.Setenv("REGISTRIES_CONFIG_PATH", registryPath) + os.Setenv("CONTAINERS_REGISTRIES_CONF", registryPath) defer os.Remove(registryPath) registries, err := sysreg.GetInsecureRegistries() assert.NoError(t, err) diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go index 000029fa4..0a09e40ea 100644 --- a/libpod/runtime_pod_infra_linux.go +++ b/libpod/runtime_pod_infra_linux.go @@ -216,7 +216,7 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container, if img == "" { img = r.config.Engine.InfraImage } - newImage, err := r.ImageRuntime().New(ctx, img, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing) + newImage, err := r.ImageRuntime().New(ctx, img, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing, nil) if err != nil { return nil, err } diff --git a/libpod/state.go b/libpod/state.go index 074d21740..4b711bae9 100644 --- a/libpod/state.go +++ b/libpod/state.go @@ -155,6 +155,19 @@ type State interface { // answer is this: use this only very sparingly, and only if you really // know what you're doing. RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error + // This is a more limited version of RewriteContainerConfig, though it + // comes with the added ability to alter a container's name. In exchange + // it loses the ability to manipulate the container's locks. + // It is not intended to be as restrictive as RewriteContainerConfig, in + // that we allow it to be run while other Podman processes are running, + // and without holding the alive lock. + // Container ID and pod membership still *ABSOLUTELY CANNOT* be altered. + // Also, you cannot change a container's dependencies - shared namespace + // containers or generic dependencies - at present. This is + // theoretically possible but not yet implemented. + // If newName is not "" the container will be renamed to the new name. + // The oldName parameter is only required if newName is given. + SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING. // This function is identical to RewriteContainerConfig, save for the // fact that it is used with pods instead. diff --git a/libpod/storage.go b/libpod/storage.go index 418eb3151..4aa42dc8e 100644 --- a/libpod/storage.go +++ b/libpod/storage.go @@ -10,7 +10,6 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -67,10 +66,6 @@ func (metadata *RuntimeContainerMetadata) SetMountLabel(mountLabel string) { // CreateContainerStorage creates the storage end of things. We already have the container spec created // TO-DO We should be passing in an Image object in the future. func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID string, options storage.ContainerOptions) (_ ContainerInfo, retErr error) { - span, _ := opentracing.StartSpanFromContext(ctx, "createContainerStorage") - span.SetTag("type", "storageService") - defer span.Finish() - var imageConfig *v1.Image if imageName != "" { var ref types.ImageReference |