diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/container.go | 9 | ||||
-rw-r--r-- | libpod/container_api.go | 44 | ||||
-rw-r--r-- | libpod/container_internal.go | 45 | ||||
-rw-r--r-- | libpod/container_internal_linux.go | 1 | ||||
-rw-r--r-- | libpod/networking_linux.go | 28 | ||||
-rw-r--r-- | libpod/oci_conmon_linux.go | 6 | ||||
-rw-r--r-- | libpod/options.go | 16 | ||||
-rw-r--r-- | libpod/runtime.go | 52 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 31 | ||||
-rw-r--r-- | libpod/runtime_volume_linux.go | 9 | ||||
-rw-r--r-- | libpod/volume_inspect.go | 3 |
11 files changed, 162 insertions, 82 deletions
diff --git a/libpod/container.go b/libpod/container.go index 7be73b3c3..fc9ef0c86 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -1185,3 +1185,12 @@ func (c *Container) HasHealthCheck() bool { func (c *Container) HealthCheckConfig() *manifest.Schema2HealthConfig { return c.config.HealthCheckConfig } + +// AutoRemove indicates whether the container will be removed after it is executed +func (c *Container) AutoRemove() bool { + spec := c.config.Spec + if spec.Annotations == nil { + return false + } + return c.Spec().Annotations[InspectAnnotationAutoremove] == InspectResponseTrue +} diff --git a/libpod/container_api.go b/libpod/container_api.go index 759a7067e..1b2d52ce3 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -32,9 +32,7 @@ func (c *Container) Init(ctx context.Context) (err error) { } } - if !(c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateStopped || - c.state.State == define.ContainerStateExited) { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateStopped, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID()) } @@ -176,15 +174,12 @@ func (c *Container) StopWithTimeout(timeout uint) error { } } - if c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateUnknown || - c.state.State == define.ContainerStatePaused { - return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String()) + if c.ensureState(define.ContainerStateStopped, define.ContainerStateExited) { + return define.ErrCtrStopped } - if c.state.State == define.ContainerStateStopped || - c.state.State == define.ContainerStateExited { - return define.ErrCtrStopped + if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { + return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created or running containers. %s is in state %s", c.ID(), c.state.State.String()) } return c.stop(timeout, false) @@ -201,6 +196,7 @@ func (c *Container) Kill(signal uint) error { } } + // TODO: Is killing a paused container OK? if c.state.State != define.ContainerStateRunning { return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String()) } @@ -234,10 +230,7 @@ func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []stri } } - conState := c.state.State - - // TODO can probably relax this once we track exec sessions - if conState != define.ContainerStateRunning { + if c.state.State != define.ContainerStateRunning { return define.ExecErrorCodeCannotInvoke, errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running") } @@ -391,11 +384,10 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re c.lock.Unlock() } - if c.state.State != define.ContainerStateCreated && - c.state.State != define.ContainerStateRunning && - c.state.State != define.ContainerStateExited { + if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers") } + defer c.newContainerEvent(events.Attach) return c.attach(streams, keys, resize, false, nil) } @@ -432,7 +424,7 @@ func (c *Container) Unmount(force bool) error { return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID()) } if mounted == 1 { - if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID()) } if len(c.state.ExecSessions) != 0 { @@ -574,7 +566,7 @@ func (c *Container) Cleanup(ctx context.Context) error { } // Check if state is good - if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID()) } @@ -652,9 +644,7 @@ func (c *Container) Sync() error { // If runtime knows about the container, update its status in runtime // And then save back to disk - if (c.state.State != define.ContainerStateUnknown) && - (c.state.State != define.ContainerStateConfigured) && - (c.state.State != define.ContainerStateExited) { + if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopped) { oldState := c.state.State if err := c.ociRuntime.UpdateContainerStatus(c); err != nil { return err @@ -666,6 +656,7 @@ func (c *Container) Sync() error { } } } + defer c.newContainerEvent(events.Sync) return nil } @@ -840,12 +831,3 @@ func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOpti defer c.newContainerEvent(events.Restore) return c.restore(ctx, options) } - -// AutoRemove indicates whether the container will be removed after it is executed -func (c *Container) AutoRemove() bool { - spec := c.config.Spec - if spec.Annotations == nil { - return false - } - return c.Spec().Annotations[InspectAnnotationAutoremove] == InspectResponseTrue -} diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 0043c9651..028d7601d 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -328,7 +328,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er // Is the container running again? // If so, we don't have to do anything - if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return false, nil } else if c.state.State == define.ContainerStateUnknown { return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!") @@ -359,8 +359,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er if err := c.reinit(ctx, true); err != nil { return false, err } - } else if c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateExited { + } else if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { // Initialize the container if err := c.init(ctx, true); err != nil { return false, err @@ -372,6 +371,18 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er return true, nil } +// Ensure that the container is in a specific state or state. +// Returns true if the container is in one of the given states, +// or false otherwise. +func (c *Container) ensureState(states ...define.ContainerStatus) bool { + for _, state := range states { + if state == c.state.State { + return true + } + } + return false +} + // Sync this container with on-disk state and runtime status // Should only be called with container lock held // This function should suffice to ensure a container's state is accurate and @@ -382,9 +393,7 @@ func (c *Container) syncContainer() error { } // If runtime knows about the container, update its status in runtime // And then save back to disk - if (c.state.State != define.ContainerStateUnknown) && - (c.state.State != define.ContainerStateConfigured) && - (c.state.State != define.ContainerStateExited) { + if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStatePaused) { oldState := c.state.State if err := c.checkExitFile(); err != nil { @@ -516,7 +525,7 @@ func (c *Container) setupStorage(ctx context.Context) error { // Tear down a container's storage prior to removal func (c *Container) teardownStorage() error { - if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID()) } @@ -721,10 +730,7 @@ func (c *Container) save() error { // Otherwise, this function will return with error if there are dependencies of this container that aren't running. func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) { // Container must be created or stopped to be started - if !(c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateCreated || - c.state.State == define.ContainerStateStopped || - c.state.State == define.ContainerStateExited) { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID()) } @@ -755,8 +761,7 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateExited { + } else if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { // Or initialize it if necessary if err := c.init(ctx, false); err != nil { return err @@ -987,7 +992,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error { // If the container is not ContainerStateStopped or // ContainerStateCreated, do nothing. - if c.state.State != define.ContainerStateStopped && c.state.State != define.ContainerStateCreated { + if !c.ensureState(define.ContainerStateStopped, define.ContainerStateCreated) { return nil } @@ -1078,8 +1083,7 @@ func (c *Container) initAndStart(ctx context.Context) (err error) { if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateExited { + } else if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { if err := c.init(ctx, false); err != nil { return err } @@ -1205,7 +1209,7 @@ func (c *Container) unpause() error { // Internal, non-locking function to restart a container func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) { - if c.state.State == define.ContainerStateUnknown || c.state.State == define.ContainerStatePaused { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state") } @@ -1733,9 +1737,8 @@ func (c *Container) checkReadyForRemoval() error { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) } - if c.state.State == define.ContainerStateRunning || - c.state.State == define.ContainerStatePaused { - return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String()) + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String()) } if len(c.state.ExecSessions) != 0 { @@ -1816,7 +1819,7 @@ func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume // Check for an exit file, and handle one if present func (c *Container) checkExitFile() error { // If the container's not running, nothing to do. - if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused { + if !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return nil } diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index b7d353327..283d38a0f 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -550,6 +550,7 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro Options: []string{"bind", "nodev", "noexec", "nosuid"}, } g.AddMount(systemdMnt) + g.AddLinuxMaskedPaths("/sys/fs/cgroup/systemd/release_agent") } return nil diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 4360c8c15..daa0619a2 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -5,6 +5,7 @@ package libpod import ( "crypto/rand" "fmt" + "io/ioutil" "net" "os" "os/exec" @@ -131,7 +132,7 @@ func checkSlirpFlags(path string) (bool, bool, bool, error) { cmd := exec.Command(path, "--help") out, err := cmd.CombinedOutput() if err != nil { - return false, false, false, err + return false, false, false, errors.Wrapf(err, "slirp4netns %q", out) } return strings.Contains(string(out), "--disable-host-loopback"), strings.Contains(string(out), "--mtu"), strings.Contains(string(out), "--enable-sandbox"), nil } @@ -158,6 +159,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { havePortMapping := len(ctr.Config().PortMappings) > 0 apiSocket := filepath.Join(ctr.runtime.config.TmpDir, fmt.Sprintf("%s.net", ctr.config.ID)) + logPath := filepath.Join(ctr.runtime.config.TmpDir, fmt.Sprintf("slirp4netns-%s.log", ctr.config.ID)) cmdArgs := []string{} if havePortMapping { @@ -165,7 +167,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { } dhp, mtu, sandbox, err := checkSlirpFlags(path) if err != nil { - return errors.Wrapf(err, "error checking slirp4netns binary %s", path) + return errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err) } if dhp { cmdArgs = append(cmdArgs, "--disable-host-loopback") @@ -210,6 +212,18 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { // Leak one end of the pipe in slirp4netns, the other will be sent to conmon cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncR, syncW) + logFile, err := os.Create(logPath) + if err != nil { + return errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath) + } + defer logFile.Close() + // Unlink immediately the file so we won't need to worry about cleaning it up later. + // It is still accessible through the open fd logFile. + if err := os.Remove(logPath); err != nil { + return errors.Wrapf(err, "delete file %s", logPath) + } + cmd.Stdout = logFile + cmd.Stderr = logFile if err := cmd.Start(); err != nil { return errors.Wrapf(err, "failed to start slirp4netns process") } @@ -238,7 +252,15 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { continue } if status.Exited() { - return errors.New("slirp4netns failed") + // Seek at the beginning of the file and read all its content + if _, err := logFile.Seek(0, 0); err != nil { + logrus.Errorf("could not seek log file: %q", err) + } + logContent, err := ioutil.ReadAll(logFile) + if err != nil { + return errors.Wrapf(err, "slirp4netns failed") + } + return errors.Errorf("slirp4netns failed: %q", logContent) } if status.Signaled() { return errors.New("slirp4netns killed by signal") diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index 658a2fe4e..448e05bdf 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -602,7 +602,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options if err != nil { return -1, nil, errors.Wrapf(err, "cannot start container %s", c.ID()) } - if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe, sessionID); err != nil { + if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil { return -1, nil, err } @@ -986,7 +986,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co if err != nil { return err } - if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe, ctr.ID()); err != nil { + if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe); err != nil { return err } /* Wait for initial setup and fork, and reap child */ @@ -1213,7 +1213,7 @@ func startCommandGivenSelinux(cmd *exec.Cmd) error { // moveConmonToCgroupAndSignal gets a container's cgroupParent and moves the conmon process to that cgroup // it then signals for conmon to start by sending nonse data down the start fd -func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec.Cmd, startFd *os.File, uuid string) error { +func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec.Cmd, startFd *os.File) error { mustCreateCgroup := true // If cgroup creation is disabled - just signal. if ctr.config.NoCgroups { diff --git a/libpod/options.go b/libpod/options.go index ddc5993af..f779b0413 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1014,6 +1014,13 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmo ctr.config.NetMode = namespaces.NetworkMode(netmode) ctr.config.CreateNetNS = true ctr.config.PortMappings = portMappings + + if rootless.IsRootless() { + if len(networks) > 0 { + return errors.New("cannot use CNI networks with rootless containers") + } + } + ctr.config.Networks = networks return nil @@ -1487,6 +1494,8 @@ func WithVolumeLabels(labels map[string]string) VolumeCreateOption { } // WithVolumeOptions sets the options of the volume. +// If the "local" driver has been selected, options will be validated. There are +// currently 3 valid options for the "local" driver - o, type, and device. func WithVolumeOptions(options map[string]string) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { @@ -1495,6 +1504,13 @@ func WithVolumeOptions(options map[string]string) VolumeCreateOption { volume.config.Options = make(map[string]string) for key, value := range options { + switch key { + case "type", "device", "o": + volume.config.Options[key] = value + default: + return errors.Wrapf(define.ErrInvalidArg, "unrecognized volume option %q is not supported with local driver", key) + } + volume.config.Options[key] = value } diff --git a/libpod/runtime.go b/libpod/runtime.go index 93d6fbead..0405a9b85 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -518,6 +518,17 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. return nil, err } + // storage.conf + storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless()) + if err != nil { + return nil, err + } + + createStorageConfFile := false + if _, err := os.Stat(storageConfFile); os.IsNotExist(err) { + createStorageConfFile = true + } + defRunConf, err := defaultRuntimeConfig() if err != nil { return nil, err @@ -692,27 +703,21 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. } if rootless.IsRootless() && configPath == "" { - configPath, err := getRootlessConfigPath() - if err != nil { - return nil, err - } - - // storage.conf - storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless()) - if err != nil { - return nil, err - } - if _, err := os.Stat(storageConfFile); os.IsNotExist(err) { + if createStorageConfFile { if err := util.WriteStorageConfigFile(&runtime.config.StorageConfig, storageConfFile); err != nil { return nil, errors.Wrapf(err, "cannot write config file %s", storageConfFile) } } + configPath, err := getRootlessConfigPath() + if err != nil { + return nil, err + } if configPath != "" { - if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(configPath), 0711); err != nil { return nil, err } - file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil && !os.IsExist(err) { return nil, errors.Wrapf(err, "cannot open file %s", configPath) } @@ -1500,6 +1505,25 @@ func (r *Runtime) GetOCIRuntimePath() string { // TODO Once runc has support for cgroups, this function should be removed. func cgroupV2Check(configPath string, tmpConfig *RuntimeConfig) error { if !tmpConfig.CgroupCheck && rootless.IsRootless() { + if tmpConfig.CgroupManager == SystemdCgroupsManager { + // If we are running rootless and the systemd manager is requested, be sure that dbus is accessible + session := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + hasSession := session != "" + if hasSession && strings.HasPrefix(session, "unix:path=") { + _, err := os.Stat(strings.TrimPrefix(session, "unix:path=")) + hasSession = err == nil + } + + if !hasSession { + logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available") + logrus.Warningf("For using systemd, you may need to login using an user session") + logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibily as root)", rootless.GetRootlessUID()) + logrus.Warningf("Falling back to --cgroup-manager=cgroupfs") + + tmpConfig.CgroupManager = CgroupfsCgroupsManager + } + + } cgroupsV2, err := cgroups.IsCgroup2UnifiedMode() if err != nil { return err @@ -1513,7 +1537,7 @@ func cgroupV2Check(configPath string, tmpConfig *RuntimeConfig) error { } tmpConfig.CgroupCheck = true tmpConfig.OCIRuntime = path - file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE, 0666) + file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return errors.Wrapf(err, "cannot open file %s", configPath) } diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 411264d25..2b214d572 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -295,21 +295,32 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai // Maintain an array of them - we need to lock them later. ctrNamedVolumes := make([]*Volume, 0, len(ctr.config.NamedVolumes)) for _, vol := range ctr.config.NamedVolumes { - // Check if it exists already - dbVol, err := r.state.Volume(vol.Name) - if err == nil { - ctrNamedVolumes = append(ctrNamedVolumes, dbVol) - // The volume exists, we're good - continue - } else if errors.Cause(err) != define.ErrNoSuchVolume { - return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name) + isAnonymous := false + if vol.Name == "" { + // Anonymous volume. We'll need to create it. + // It needs a name first. + vol.Name = stringid.GenerateNonCryptoID() + isAnonymous = true + } else { + // Check if it exists already + dbVol, err := r.state.Volume(vol.Name) + if err == nil { + ctrNamedVolumes = append(ctrNamedVolumes, dbVol) + // The volume exists, we're good + continue + } else if errors.Cause(err) != define.ErrNoSuchVolume { + return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name) + } } logrus.Debugf("Creating new volume %s for container", vol.Name) // The volume does not exist, so we need to create it. - newVol, err := r.newVolume(ctx, WithVolumeName(vol.Name), withSetCtrSpecific(), - WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID())) + volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID())} + if isAnonymous { + volOptions = append(volOptions, withSetCtrSpecific()) + } + newVol, err := r.newVolume(ctx, volOptions...) if err != nil { return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name) } diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index ba4fff4be..5b05acea4 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -48,6 +48,15 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } volume.config.CreatedTime = time.Now() + // Check if volume with given name exists. + exists, err := r.state.HasVolume(volume.config.Name) + if err != nil { + return nil, errors.Wrapf(err, "error checking if volume with name %s exists", volume.config.Name) + } + if exists { + return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name) + } + if volume.config.Driver == define.VolumeDriverLocal { logrus.Debugf("Validating options for local driver") // Validate options diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go index 87ed9d340..c333b8961 100644 --- a/libpod/volume_inspect.go +++ b/libpod/volume_inspect.go @@ -62,6 +62,9 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) { } data.Scope = v.Scope() data.Options = make(map[string]string) + for k, v := range v.config.Options { + data.Options[k] = v + } data.UID = v.config.UID data.GID = v.config.GID data.ContainerSpecific = v.config.IsCtrSpecific |