summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xcontrib/pkginstaller/scripts/postinstall21
-rwxr-xr-xcontrib/pkginstaller/scripts/preinstall4
-rw-r--r--docs/source/markdown/podman-volume-create.1.md15
-rw-r--r--libpod/boltdb_state.go53
-rw-r--r--libpod/boltdb_state_internal.go14
-rw-r--r--libpod/container_internal_common.go6
-rw-r--r--libpod/define/config.go4
-rw-r--r--libpod/define/volume_inspect.go3
-rw-r--r--libpod/events.go2
-rw-r--r--libpod/events/config.go2
-rw-r--r--libpod/events/events.go8
-rw-r--r--libpod/events/journal_linux.go6
-rw-r--r--libpod/runtime.go3
-rw-r--r--libpod/runtime_cstorage.go11
-rw-r--r--libpod/runtime_ctr.go2
-rw-r--r--libpod/runtime_img.go21
-rw-r--r--libpod/runtime_volume_linux.go61
-rw-r--r--libpod/state.go8
-rw-r--r--libpod/volume.go17
-rw-r--r--libpod/volume_inspect.go1
-rw-r--r--libpod/volume_internal.go5
-rw-r--r--libpod/volume_internal_linux.go16
-rw-r--r--pkg/domain/entities/events.go5
-rw-r--r--pkg/domain/infra/abi/terminal/sigproxy_commn.go16
-rw-r--r--pkg/domain/infra/tunnel/containers.go7
-rw-r--r--pkg/domain/infra/tunnel/runtime.go31
-rw-r--r--pkg/signal/signal_common.go15
-rw-r--r--pkg/signal/signal_common_test.go49
-rw-r--r--pkg/signal/signal_linux.go8
-rw-r--r--pkg/signal/signal_linux_mipsx.go8
-rw-r--r--pkg/signal/signal_unix.go8
-rw-r--r--pkg/signal/signal_unsupported.go6
-rw-r--r--pkg/util/utils_freebsd.go2
-rw-r--r--pkg/util/utils_linux.go5
-rw-r--r--podman.spec.rpkg2
-rw-r--r--test/apiv2/27-containersEvents.at2
-rw-r--r--test/e2e/config/containers.conf1
-rw-r--r--test/e2e/events_test.go12
-rw-r--r--test/e2e/volume_create_test.go54
-rw-r--r--test/e2e/volume_plugin_test.go9
-rw-r--r--test/system/030-run.bats18
-rw-r--r--test/system/032-sig-proxy.bats43
42 files changed, 534 insertions, 50 deletions
diff --git a/contrib/pkginstaller/scripts/postinstall b/contrib/pkginstaller/scripts/postinstall
index db17eede8..c62971a14 100755
--- a/contrib/pkginstaller/scripts/postinstall
+++ b/contrib/pkginstaller/scripts/postinstall
@@ -2,26 +2,7 @@
set -e
-BZSH_PODMAN_PATH_EXP='PATH="/opt/podman/bin:$PATH"'
-FISH_PODMAN_PATH_EXP='set PATH "/opt/podman/bin $PATH"'
-BASHRC_PATH="$HOME/.bash_profile"
-ZSHENV_PATH="$HOME/.zshenv"
-ZSHRC_PATH="$HOME/.zshrc"
-FSHCFG_PATH="$HOME/.config/fish/config.fish"
-
-# append /Applications/podman/bin to $PATH
-if [ -f "$BASHRC_PATH" ]; then
- grep -Fxq "$BZSH_PODMAN_PATH_EXP" "$BASHRC_PATH" || echo "$BZSH_PODMAN_PATH_EXP" >> "$BASHRC_PATH"
-fi
-if [ -f "$ZSHENV_PATH" ]; then
- grep -Fxq "$BZSH_PODMAN_PATH_EXP" "$ZSHENV_PATH" || echo "$BZSH_PODMAN_PATH_EXP" >> "$ZSHENV_PATH"
-fi
-if [ -f "$ZSHRC_PATH" ]; then
- grep -Fxq "$BZSH_PODMAN_PATH_EXP" "$ZSHRC_PATH" || echo "$BZSH_PODMAN_PATH_EXP" >> "$ZSHRC_PATH"
-fi
-if [ -f "$FSHCFG_PATH" ]; then
- grep -Fxq "$FISH_PODMAN_PATH_EXP" "$FSHCFG_PATH" || echo "$FISH_PODMAN_PATH_EXP" >> "$FSHCFG_PATH"
-fi
+echo "/opt/podman/bin" > /etc/paths.d/podman-pkg
ln -s /opt/podman/bin/podman-mac-helper /opt/podman/qemu/bin/podman-mac-helper
ln -s /opt/podman/bin/gvproxy /opt/podman/qemu/bin/gvproxy
diff --git a/contrib/pkginstaller/scripts/preinstall b/contrib/pkginstaller/scripts/preinstall
index a381868fc..22336222f 100755
--- a/contrib/pkginstaller/scripts/preinstall
+++ b/contrib/pkginstaller/scripts/preinstall
@@ -3,3 +3,7 @@
set -e
rm -rf /opt/podman
+
+if [ ! -d "/etc/paths.d" ]; then
+ mkdir -p /etc/paths.d
+fi
diff --git a/docs/source/markdown/podman-volume-create.1.md b/docs/source/markdown/podman-volume-create.1.md
index 934488111..1e99df55a 100644
--- a/docs/source/markdown/podman-volume-create.1.md
+++ b/docs/source/markdown/podman-volume-create.1.md
@@ -17,7 +17,13 @@ driver options can be set using the **--opt** flag.
#### **--driver**=*driver*
-Specify the volume driver name (default **local**). Setting this to a value other than **local** Podman attempts to create the volume using a volume plugin with the given name. Such plugins must be defined in the **volume_plugins** section of the **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)** configuration file.
+Specify the volume driver name (default **local**).
+There are two drivers supported by Podman itself: **local** and **image**.
+The **local** driver uses a directory on disk as the backend by default, but can also use the **mount(8)** command to mount a filesystem as the volume if **--opt** is specified.
+The **image** driver uses an image as the backing store of for the volume.
+An overlay filesystem will be created, which allows changes to the volume to be committed as a new layer on top of the image.
+Using a value other than **local or **image**, Podman will attempt to create the volume using a volume plugin with the given name.
+Such plugins must be defined in the **volume_plugins** section of the **[containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)** configuration file.
#### **--help**
@@ -43,7 +49,10 @@ The `o` option sets options for the mount, and is equivalent to the `-o` flag to
- The `o` option supports using volume options other than the UID/GID options with the **local** driver and requires root privileges.
- The `o` options supports the `timeout` option which allows users to set a driver specific timeout in seconds before volume creation fails. For example, **--opts=o=timeout=10** sets a driver timeout of 10 seconds.
-When not using the **local** driver, the given options are passed directly to the volume plugin. In this case, supported options are dictated by the plugin in question, not Podman.
+For the **image** driver, the only supported option is `image`, which specifies the image the volume is based on.
+This option is mandatory when using the **image** driver.
+
+When not using the **local** and **image** drivers, the given options are passed directly to the volume plugin. In this case, supported options are dictated by the plugin in question, not Podman.
## EXAMPLES
@@ -57,6 +66,8 @@ $ podman volume create --label foo=bar myvol
# podman volume create --opt device=tmpfs --opt type=tmpfs --opt o=nodev,noexec myvol
# podman volume create --opt device=tmpfs --opt type=tmpfs --opt o=uid=1000,gid=1000 testvol
+
+# podman volume create --driver image --opt image=fedora:latest fedoraVol
```
## QUOTAS
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 4fd95a3cf..77c234892 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -109,6 +109,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
runtimeConfigBkt,
exitCodeBkt,
exitCodeTimeStampBkt,
+ volCtrsBkt,
}
// Does the DB need an update?
@@ -2551,6 +2552,11 @@ func (s *BoltState) AddVolume(volume *Volume) error {
return err
}
+ volCtrsBkt, err := getVolumeContainersBucket(tx)
+ if err != nil {
+ return err
+ }
+
// Check if we already have a volume with the given name
volExists := allVolsBkt.Get(volName)
if volExists != nil {
@@ -2580,6 +2586,12 @@ func (s *BoltState) AddVolume(volume *Volume) error {
}
}
+ if volume.config.StorageID != "" {
+ if err := volCtrsBkt.Put([]byte(volume.config.StorageID), volName); err != nil {
+ return fmt.Errorf("storing volume %s container ID in DB: %w", volume.Name(), err)
+ }
+ }
+
if err := allVolsBkt.Put(volName, volName); err != nil {
return fmt.Errorf("storing volume %s in all volumes bucket in DB: %w", volume.Name(), err)
}
@@ -2619,6 +2631,11 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return err
}
+ volCtrIDBkt, err := getVolumeContainersBucket(tx)
+ if err != nil {
+ return err
+ }
+
// Check if the volume exists
volDB := volBkt.Bucket(volName)
if volDB == nil {
@@ -2665,6 +2682,11 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
if err := volBkt.DeleteBucket(volName); err != nil {
return fmt.Errorf("removing volume %s from DB: %w", volume.Name(), err)
}
+ if volume.config.StorageID != "" {
+ if err := volCtrIDBkt.Delete([]byte(volume.config.StorageID)); err != nil {
+ return fmt.Errorf("removing volume %s container ID from DB: %w", volume.Name(), err)
+ }
+ }
return nil
})
@@ -3618,3 +3640,34 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
return pods, nil
}
+
+// ContainerIDIsVolume checks if the given c/storage container ID is used as
+// backing storage for a volume.
+func (s *BoltState) ContainerIDIsVolume(id string) (bool, error) {
+ if !s.valid {
+ return false, define.ErrDBClosed
+ }
+
+ isVol := false
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return false, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ volCtrsBkt, err := getVolumeContainersBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volName := volCtrsBkt.Get([]byte(id))
+ if volName != nil {
+ isVol = true
+ }
+
+ return nil
+ })
+ return isVol, err
+}
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 87f1fa4eb..7f2d49b31 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -28,6 +28,7 @@ const (
execName = "exec"
aliasesName = "aliases"
runtimeConfigName = "runtime-config"
+ volumeCtrsName = "volume-ctrs"
exitCodeName = "exit-code"
exitCodeTimeStampName = "exit-code-time-stamp"
@@ -67,6 +68,7 @@ var (
dependenciesBkt = []byte(dependenciesName)
volDependenciesBkt = []byte(volCtrDependencies)
networksBkt = []byte(networksName)
+ volCtrsBkt = []byte(volumeCtrsName)
exitCodeBkt = []byte(exitCodeName)
exitCodeTimeStampBkt = []byte(exitCodeTimeStampName)
@@ -384,6 +386,14 @@ func getExitCodeTimeStampBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
return bkt, nil
}
+func getVolumeContainersBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(volCtrsBkt)
+ if bkt == nil {
+ return nil, fmt.Errorf("volume containers bucket not found in DB: %w", define.ErrDBBadConfig)
+ }
+ return bkt, nil
+}
+
func (s *BoltState) getContainerConfigFromDB(id []byte, config *ContainerConfig, ctrsBkt *bolt.Bucket) error {
ctrBkt := ctrsBkt.Bucket(id)
if ctrBkt == nil {
@@ -528,6 +538,9 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
}
}
+ // Need this for UsesVolumeDriver() so set it now.
+ volume.runtime = s.runtime
+
// Retrieve volume driver
if volume.UsesVolumeDriver() {
plugin, err := s.runtime.getVolumePlugin(volume.config)
@@ -550,7 +563,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
}
volume.lock = lock
- volume.runtime = s.runtime
volume.valid = true
return nil
diff --git a/libpod/container_internal_common.go b/libpod/container_internal_common.go
index 874e9affe..29107d4b6 100644
--- a/libpod/container_internal_common.go
+++ b/libpod/container_internal_common.go
@@ -109,7 +109,11 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// If the flag to mount all devices is set for a privileged container, add
// all the devices from the host's machine into the container
if c.config.MountAllDevices {
- if err := util.AddPrivilegedDevices(&g); err != nil {
+ systemdMode := false
+ if c.config.Systemd != nil {
+ systemdMode = *c.config.Systemd
+ }
+ if err := util.AddPrivilegedDevices(&g, systemdMode); err != nil {
return nil, err
}
}
diff --git a/libpod/define/config.go b/libpod/define/config.go
index 1fad5cc9a..0427206ed 100644
--- a/libpod/define/config.go
+++ b/libpod/define/config.go
@@ -40,6 +40,10 @@ type InfoData struct {
// itself.
const VolumeDriverLocal = "local"
+// VolumeDriverImage is the "image" volume driver. It is managed by Libpod and
+// uses volumes backed by an image.
+const VolumeDriverImage = "image"
+
const (
OCIManifestDir = "oci-dir"
OCIArchive = "oci-archive"
diff --git a/libpod/define/volume_inspect.go b/libpod/define/volume_inspect.go
index 76120647c..4d6f12080 100644
--- a/libpod/define/volume_inspect.go
+++ b/libpod/define/volume_inspect.go
@@ -58,6 +58,9 @@ type InspectVolumeData struct {
NeedsChown bool `json:"NeedsChown,omitempty"`
// Timeout is the specified driver timeout if given
Timeout uint `json:"Timeout,omitempty"`
+ // StorageID is the ID of the container backing the volume in c/storage.
+ // Only used with Image Volumes.
+ StorageID string `json:"StorageID,omitempty"`
}
type VolumeReload struct {
diff --git a/libpod/events.go b/libpod/events.go
index 2f9799114..31a857a7d 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -34,6 +34,7 @@ func (c *Container) newContainerEvent(status events.Status) {
e.Details = events.Details{
ID: e.ID,
+ PodID: c.PodID(),
Attributes: c.Labels(),
}
@@ -59,6 +60,7 @@ func (c *Container) newContainerExitedEvent(exitCode int32) {
e.Name = c.Name()
e.Image = c.config.RootfsImageName
e.Type = events.Container
+ e.PodID = c.PodID()
e.ContainerExitCode = int(exitCode)
e.Details = events.Details{
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 4ea45a00e..28bc87a34 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -50,6 +50,8 @@ type Event struct {
type Details struct {
// ID is the event ID
ID string
+ // PodID is the ID of the pod associated with the container.
+ PodID string `json:",omitempty"`
// Attributes can be used to describe specifics about the event
// in the case of a container event, labels for example
Attributes map[string]string
diff --git a/libpod/events/events.go b/libpod/events/events.go
index 764481e51..2105a3b89 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -76,7 +76,13 @@ func (e *Event) ToHumanReadable(truncate bool) string {
}
switch e.Type {
case Container, Pod:
- humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s, health_status=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name, e.HealthStatus)
+ humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name)
+ if e.PodID != "" {
+ humanFormat += fmt.Sprintf(", pod_id=%s", e.PodID)
+ }
+ if e.HealthStatus != "" {
+ humanFormat += fmt.Sprintf(", health_status=%s", e.HealthStatus)
+ }
// check if the container has labels and add it to the output
if len(e.Attributes) > 0 {
for k, v := range e.Attributes {
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 4986502a2..e303a205d 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -50,6 +50,9 @@ func (e EventJournalD) Write(ee Event) error {
if ee.ContainerExitCode != 0 {
m["PODMAN_EXIT_CODE"] = strconv.Itoa(ee.ContainerExitCode)
}
+ if ee.PodID != "" {
+ m["PODMAN_POD_ID"] = ee.PodID
+ }
// If we have container labels, we need to convert them to a string so they
// can be recorded with the event
if len(ee.Details.Attributes) > 0 {
@@ -161,6 +164,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) {
case Container, Pod:
newEvent.ID = entry.Fields["PODMAN_ID"]
newEvent.Image = entry.Fields["PODMAN_IMAGE"]
+ newEvent.PodID = entry.Fields["PODMAN_POD_ID"]
if code, ok := entry.Fields["PODMAN_EXIT_CODE"]; ok {
intCode, err := strconv.Atoi(code)
if err != nil {
@@ -179,7 +183,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) {
// if we have labels, add them to the event
if len(labels) > 0 {
- newEvent.Details = Details{Attributes: labels}
+ newEvent.Attributes = labels
}
}
newEvent.HealthStatus = entry.Fields["PODMAN_HEALTH_STATUS"]
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 83c9f53e2..f250d759c 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -1091,6 +1091,9 @@ func (r *Runtime) getVolumePlugin(volConfig *VolumeConfig) (*plugin.VolumePlugin
pluginPath, ok := r.config.Engine.VolumePlugins[name]
if !ok {
+ if name == define.VolumeDriverImage {
+ return nil, nil
+ }
return nil, fmt.Errorf("no volume plugin with name %s available: %w", name, define.ErrMissingPlugin)
}
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
index 372434b49..5917b7931 100644
--- a/libpod/runtime_cstorage.go
+++ b/libpod/runtime_cstorage.go
@@ -86,6 +86,17 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
return fmt.Errorf("refusing to remove %q as it exists in libpod as container %s: %w", idOrName, ctr.ID, define.ErrCtrExists)
}
+ // Error out if this is an image-backed volume
+ allVols, err := r.state.AllVolumes()
+ if err != nil {
+ return err
+ }
+ for _, vol := range allVols {
+ if vol.config.Driver == define.VolumeDriverImage && vol.config.StorageID == ctr.ID {
+ return fmt.Errorf("refusing to remove %q as it exists in libpod as an image-backed volume %s: %w", idOrName, vol.Name(), define.ErrCtrExists)
+ }
+ }
+
if !force {
timesMounted, err := r.store.Mounted(ctr.ID)
if err != nil {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 7b3cbadfa..bb30078cb 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -512,7 +512,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
volOptions = append(volOptions, parsedOptions...)
}
}
- newVol, err := r.newVolume(false, volOptions...)
+ newVol, err := r.newVolume(ctx, false, volOptions...)
if err != nil {
return nil, fmt.Errorf("creating named volume %q: %w", vol.Name, err)
}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 87b77c3eb..d8e88ca50 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -51,6 +51,23 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
}
}
}
+
+ // Need to handle volumes with the image driver
+ vols, err := r.state.AllVolumes()
+ if err != nil {
+ return err
+ }
+ for _, vol := range vols {
+ if vol.config.Driver != define.VolumeDriverImage || vol.config.StorageImageID != imageID {
+ continue
+ }
+ // Do a force removal of the volume, and all containers
+ // using it.
+ if err := r.RemoveVolume(ctx, vol, true, nil); err != nil {
+ return fmt.Errorf("removing image %s: volume %s backed by image could not be removed: %w", imageID, vol.Name(), err)
+ }
+ }
+
// Note that `libimage` will take care of removing any leftover
// containers from the storage.
return nil
@@ -74,6 +91,10 @@ func (r *Runtime) IsExternalContainerCallback(_ context.Context) libimage.IsExte
if errors.Is(err, define.ErrNoSuchCtr) {
return true, nil
}
+ isVol, err := r.state.ContainerIDIsVolume(idOrName)
+ if err == nil && !isVol {
+ return true, nil
+ }
return false, nil
}
}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 08fdbf977..c59417979 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
volplugin "github.com/containers/podman/v4/libpod/plugin"
+ "github.com/containers/storage"
"github.com/containers/storage/drivers/quota"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/stringid"
@@ -22,18 +23,20 @@ import (
"github.com/sirupsen/logrus"
)
+const volumeSuffix = "+volume"
+
// NewVolume creates a new empty volume
func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
if !r.valid {
return nil, define.ErrRuntimeStopped
}
- return r.newVolume(false, options...)
+ return r.newVolume(ctx, false, options...)
}
// newVolume creates a new empty volume with the given options.
// The createPluginVolume can be set to true to make it not create the volume in the volume plugin,
// this is required for the UpdateVolumePlugins() function. If you are not sure, set this to false.
-func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOption) (_ *Volume, deferredErr error) {
+func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, options ...VolumeCreateOption) (_ *Volume, deferredErr error) {
volume := newVolume(r)
for _, option := range options {
if err := option(volume); err != nil {
@@ -83,6 +86,50 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
return nil, fmt.Errorf("invalid mount option %s for driver 'local': %w", key, define.ErrInvalidArg)
}
}
+ } else if volume.config.Driver == define.VolumeDriverImage && !volume.UsesVolumeDriver() {
+ logrus.Debugf("Creating image-based volume")
+ var imgString string
+ // Validate options
+ for key, val := range volume.config.Options {
+ switch strings.ToLower(key) {
+ case "image":
+ imgString = val
+ default:
+ return nil, fmt.Errorf("invalid mount option %s for driver 'image': %w", key, define.ErrInvalidArg)
+ }
+ }
+
+ if imgString == "" {
+ return nil, fmt.Errorf("must provide an image name when creating a volume with the image driver: %w", define.ErrInvalidArg)
+ }
+
+ // Look up the image
+ image, _, err := r.libimageRuntime.LookupImage(imgString, nil)
+ if err != nil {
+ return nil, fmt.Errorf("looking up image %s to create volume failed: %w", imgString, err)
+ }
+
+ // Generate a c/storage name and ID for the volume.
+ // Use characters Podman does not allow for the name, to ensure
+ // no collision with containers.
+ volume.config.StorageID = stringid.GenerateRandomID()
+ volume.config.StorageName = volume.config.Name + volumeSuffix
+ volume.config.StorageImageID = image.ID()
+
+ // Create a backing container in c/storage.
+ storageConfig := storage.ContainerOptions{
+ LabelOpts: []string{"filetype:container_file_t:s0"},
+ }
+ if _, err := r.storageService.CreateContainerStorage(ctx, r.imageContext, imgString, image.ID(), volume.config.StorageName, volume.config.StorageID, storageConfig); err != nil {
+ return nil, fmt.Errorf("creating backing storage for image driver: %w", err)
+ }
+ defer func() {
+ if deferredErr != nil {
+ if err := r.storageService.DeleteContainer(volume.config.StorageID); err != nil {
+ logrus.Errorf("Error removing volume %s backing storage: %v", volume.config.Name, err)
+ }
+ }
+ }()
}
// Now we get conditional: we either need to make the volume in the
@@ -196,7 +243,7 @@ func (r *Runtime) UpdateVolumePlugins(ctx context.Context) *define.VolumeReload
}
for _, vol := range vols {
allPluginVolumes[vol.Name] = struct{}{}
- if _, err := r.newVolume(true, WithVolumeName(vol.Name), WithVolumeDriver(driverName)); err != nil {
+ if _, err := r.newVolume(ctx, true, WithVolumeName(vol.Name), WithVolumeDriver(driverName)); err != nil {
// If the volume exists this is not an error, just ignore it and log. It is very likely
// that the volume from the plugin was already in our db.
if !errors.Is(err, define.ErrVolumeExists) {
@@ -375,6 +422,14 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
return fmt.Errorf("volume %s could not be removed from plugin %s: %w", v.Name(), v.Driver(), err)
}
}
+ } else if v.config.Driver == define.VolumeDriverImage {
+ if err := v.runtime.storageService.DeleteContainer(v.config.StorageID); err != nil {
+ // Storage container is already gone, no problem.
+ if !(errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown)) {
+ return fmt.Errorf("removing volume %s storage: %w", v.Name(), err)
+ }
+ logrus.Infof("Storage for volume %s already removed", v.Name())
+ }
}
// Remove the volume from the state
diff --git a/libpod/state.go b/libpod/state.go
index 4fbd3c302..9d9604563 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -144,6 +144,14 @@ type State interface {
// As with RemoveExecSession, container state will not be modified.
RemoveContainerExecSessions(ctr *Container) error
+ // ContainerIDIsVolume checks if the given container ID is in use by a
+ // volume.
+ // Some volumes are backed by a c/storage container. These do not have a
+ // corresponding Container struct in Libpod, but rather a Volume.
+ // This determines if a given ID from c/storage is used as a backend by
+ // a Podman volume.
+ ContainerIDIsVolume(id string) (bool, error)
+
// PLEASE READ FULL DESCRIPTION BEFORE USING.
// Rewrite a container's configuration.
// This function breaks libpod's normal prohibition on a read-only
diff --git a/libpod/volume.go b/libpod/volume.go
index a054e4032..2d4ea4280 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -57,6 +57,15 @@ type VolumeConfig struct {
DisableQuota bool `json:"disableQuota,omitempty"`
// Timeout allows users to override the default driver timeout of 5 seconds
Timeout *uint `json:"timeout,omitempty"`
+ // StorageName is the name of the volume in c/storage. Only used for
+ // image volumes.
+ StorageName string `json:"storageName,omitempty"`
+ // StorageID is the ID of the volume in c/storage. Only used for image
+ // volumes.
+ StorageID string `json:"storageID,omitempty"`
+ // StorageImageID is the ID of the image the volume was based off of.
+ // Only used for image volumes.
+ StorageImageID string `json:"storageImageID,omitempty"`
}
// VolumeState holds the volume's mutable state.
@@ -149,7 +158,7 @@ func (v *Volume) MountCount() (uint, error) {
// Internal-only helper for volume mountpoint
func (v *Volume) mountPoint() string {
- if v.UsesVolumeDriver() {
+ if v.UsesVolumeDriver() || v.config.Driver == define.VolumeDriverImage {
return v.state.MountPoint
}
@@ -250,6 +259,12 @@ func (v *Volume) IsDangling() (bool, error) {
// drivers are pluggable backends for volumes that will manage the storage and
// mounting.
func (v *Volume) UsesVolumeDriver() bool {
+ if v.config.Driver == define.VolumeDriverImage {
+ if _, ok := v.runtime.config.Engine.VolumePlugins[v.config.Driver]; ok {
+ return true
+ }
+ return false
+ }
return !(v.config.Driver == define.VolumeDriverLocal || v.config.Driver == "")
}
diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go
index 73441576b..31fbd5eff 100644
--- a/libpod/volume_inspect.go
+++ b/libpod/volume_inspect.go
@@ -64,6 +64,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
data.MountCount = v.state.MountCount
data.NeedsCopyUp = v.state.NeedsCopyUp
data.NeedsChown = v.state.NeedsChown
+ data.StorageID = v.config.StorageID
if v.config.Timeout != nil {
data.Timeout = *v.config.Timeout
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 43c3f9b0b..14b852f8e 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -39,6 +39,11 @@ func (v *Volume) needsMount() bool {
return true
}
+ // Image driver always needs mount
+ if v.config.Driver == define.VolumeDriverImage {
+ return true
+ }
+
// Commit 28138dafcc added the UID and GID options to this map
// However we should only mount when options other than uid and gid are set.
// see https://github.com/containers/podman/issues/10620
diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go
index cfd60554d..440bceec3 100644
--- a/libpod/volume_internal_linux.go
+++ b/libpod/volume_internal_linux.go
@@ -66,6 +66,15 @@ func (v *Volume) mount() error {
v.state.MountCount++
v.state.MountPoint = mountPoint
return v.save()
+ } else if v.config.Driver == define.VolumeDriverImage {
+ mountPoint, err := v.runtime.storageService.MountContainerImage(v.config.StorageID)
+ if err != nil {
+ return fmt.Errorf("mounting volume %s image failed: %w", v.Name(), err)
+ }
+
+ v.state.MountCount++
+ v.state.MountPoint = mountPoint
+ return v.save()
}
volDevice := v.config.Options["device"]
@@ -161,6 +170,13 @@ func (v *Volume) unmount(force bool) error {
v.state.MountPoint = ""
return v.save()
+ } else if v.config.Driver == define.VolumeDriverImage {
+ if _, err := v.runtime.storageService.UnmountContainerImage(v.config.StorageID, force); err != nil {
+ return fmt.Errorf("unmounting volume %s image: %w", v.Name(), err)
+ }
+
+ v.state.MountPoint = ""
+ return v.save()
}
// Unmount the volume
diff --git a/pkg/domain/entities/events.go b/pkg/domain/entities/events.go
index de218b285..34a6fe048 100644
--- a/pkg/domain/entities/events.go
+++ b/pkg/domain/entities/events.go
@@ -14,7 +14,7 @@ type Event struct {
// TODO: it would be nice to have full control over the types at some
// point and fork such Docker types.
dockerEvents.Message
- HealthStatus string
+ HealthStatus string `json:",omitempty"`
}
// ConvertToLibpodEvent converts an entities event to a libpod one.
@@ -34,6 +34,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
image := e.Actor.Attributes["image"]
name := e.Actor.Attributes["name"]
details := e.Actor.Attributes
+ podID := e.Actor.Attributes["podId"]
delete(details, "image")
delete(details, "name")
delete(details, "containerExitCode")
@@ -47,6 +48,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
Type: t,
HealthStatus: e.HealthStatus,
Details: libpodEvents.Details{
+ PodID: podID,
Attributes: details,
},
}
@@ -61,6 +63,7 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event {
attributes["image"] = e.Image
attributes["name"] = e.Name
attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode)
+ attributes["podId"] = e.PodID
message := dockerEvents.Message{
// Compatibility with clients that still look for deprecated API elements
Status: e.Status.String(),
diff --git a/pkg/domain/infra/abi/terminal/sigproxy_commn.go b/pkg/domain/infra/abi/terminal/sigproxy_commn.go
index 3a0132ef3..d42685508 100644
--- a/pkg/domain/infra/abi/terminal/sigproxy_commn.go
+++ b/pkg/domain/infra/abi/terminal/sigproxy_commn.go
@@ -15,33 +15,25 @@ import (
"github.com/sirupsen/logrus"
)
-// Make sure the signal buffer is sufficiently big.
-// runc is using the same value.
-const signalBufferSize = 2048
-
// ProxySignals ...
func ProxySignals(ctr *libpod.Container) {
// Stop catching the shutdown signals (SIGINT, SIGTERM) - they're going
// to the container now.
shutdown.Stop() //nolint: errcheck
- sigBuffer := make(chan os.Signal, signalBufferSize)
+ sigBuffer := make(chan os.Signal, signal.SignalBufferSize)
signal.CatchAll(sigBuffer)
logrus.Debugf("Enabling signal proxying")
go func() {
for s := range sigBuffer {
- // Ignore SIGCHLD and SIGPIPE - these are mostly likely
- // intended for the podman command itself.
- // SIGURG was added because of golang 1.14 and its preemptive changes
- // causing more signals to "show up".
- // https://github.com/containers/podman/issues/5483
- if s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG {
+ syscallSignal := s.(syscall.Signal)
+ if signal.IsSignalIgnoredBySigProxy(syscallSignal) {
continue
}
- if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil {
+ if err := ctr.Kill(uint(syscallSignal)); err != nil {
if errors.Is(err, define.ErrCtrStateInvalid) {
logrus.Infof("Ceasing signal forwarding to container %s as it has stopped", ctr.ID())
} else {
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index c82c9ba33..0b573686f 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -828,6 +828,13 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
}
// Attach
+ if opts.SigProxy {
+ remoteProxySignals(con.ID, func(signal string) error {
+ killOpts := entities.KillOptions{All: false, Latest: false, Signal: signal}
+ _, err := ic.ContainerKill(ctx, []string{con.ID}, killOpts)
+ return err
+ })
+ }
if err := startAndAttach(ic, con.ID, &opts.DetachKeys, opts.InputStream, opts.OutputStream, opts.ErrorStream); err != nil {
if err == define.ErrDetach {
return &report, nil
diff --git a/pkg/domain/infra/tunnel/runtime.go b/pkg/domain/infra/tunnel/runtime.go
index 6542ea5b7..75bd4ef5e 100644
--- a/pkg/domain/infra/tunnel/runtime.go
+++ b/pkg/domain/infra/tunnel/runtime.go
@@ -2,6 +2,12 @@ package tunnel
import (
"context"
+ "os"
+ "syscall"
+
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/containers/podman/v4/pkg/signal"
+ "github.com/sirupsen/logrus"
)
// Image-related runtime using an ssh-tunnel to utilize Podman service
@@ -18,3 +24,28 @@ type ContainerEngine struct {
type SystemEngine struct {
ClientCtx context.Context
}
+
+func remoteProxySignals(ctrID string, killFunc func(string) error) {
+ sigBuffer := make(chan os.Signal, signal.SignalBufferSize)
+ signal.CatchAll(sigBuffer)
+
+ logrus.Debugf("Enabling signal proxying")
+
+ go func() {
+ for s := range sigBuffer {
+ syscallSignal := s.(syscall.Signal)
+ if signal.IsSignalIgnoredBySigProxy(syscallSignal) {
+ continue
+ }
+ signalName, err := signal.ParseSysSignalToName(syscallSignal)
+ if err != nil {
+ logrus.Infof("Ceasing signal %v forwarding to container %s as it has stopped: %s", s, ctrID, err)
+ }
+ if err := killFunc(signalName); err != nil {
+ if err.Error() == define.ErrCtrStateInvalid.Error() {
+ logrus.Debugf("Ceasing signal %q forwarding to container %s as it has stopped", signalName, ctrID)
+ }
+ }
+ }
+ }()
+}
diff --git a/pkg/signal/signal_common.go b/pkg/signal/signal_common.go
index fc1ecc04d..a81d0461b 100644
--- a/pkg/signal/signal_common.go
+++ b/pkg/signal/signal_common.go
@@ -9,6 +9,10 @@ import (
"syscall"
)
+// Make sure the signal buffer is sufficiently big.
+// runc is using the same value.
+const SignalBufferSize = 2048
+
// ParseSignal translates a string to a valid syscall signal.
// It returns an error if the signal map doesn't include the given signal.
func ParseSignal(rawSignal string) (syscall.Signal, error) {
@@ -56,3 +60,14 @@ func StopCatch(sigc chan os.Signal) {
signal.Stop(sigc)
close(sigc)
}
+
+// ParseSysSignalToName translates syscall.Signal to its name in the operating system.
+// For example, syscall.Signal(9) will return "KILL" on Linux system.
+func ParseSysSignalToName(s syscall.Signal) (string, error) {
+ for k, v := range SignalMap {
+ if v == s {
+ return k, nil
+ }
+ }
+ return "", fmt.Errorf("unknown syscall signal: %s", s)
+}
diff --git a/pkg/signal/signal_common_test.go b/pkg/signal/signal_common_test.go
index c4ae6b389..bd9b230f7 100644
--- a/pkg/signal/signal_common_test.go
+++ b/pkg/signal/signal_common_test.go
@@ -118,3 +118,52 @@ func TestParseSignalNameOrNumber(t *testing.T) {
})
}
}
+
+func TestParseSysSignalToName(t *testing.T) {
+ type args struct {
+ signal syscall.Signal
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ wantErr bool
+ }{
+ {
+ name: "Kill should work",
+ args: args{
+ signal: syscall.SIGKILL,
+ },
+ want: "KILL",
+ wantErr: false,
+ },
+ {
+ name: "Non-defined signal number should not work",
+ args: args{
+ signal: 923,
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "garbage should fail",
+ args: args{
+ signal: -1,
+ },
+ want: "",
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := ParseSysSignalToName(tt.args.signal)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ParseSysSignalToName() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("ParseSysSignalToName() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go
index 5103b6033..81e4ed758 100644
--- a/pkg/signal/signal_linux.go
+++ b/pkg/signal/signal_linux.go
@@ -89,3 +89,11 @@ var SignalMap = map[string]syscall.Signal{
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
+
+// IsSignalIgnoredBySigProxy determines whether sig-proxy should ignore syscall signal
+func IsSignalIgnoredBySigProxy(s syscall.Signal) bool {
+ // Ignore SIGCHLD and SIGPIPE - these are most likely intended for the podman command itself.
+ // SIGURG was added because of golang 1.14 and its preemptive changes causing more signals to "show up".
+ // https://github.com/containers/podman/issues/5483
+ return s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG
+}
diff --git a/pkg/signal/signal_linux_mipsx.go b/pkg/signal/signal_linux_mipsx.go
index cdf9ad4c5..c97eeb23d 100644
--- a/pkg/signal/signal_linux_mipsx.go
+++ b/pkg/signal/signal_linux_mipsx.go
@@ -90,3 +90,11 @@ var SignalMap = map[string]syscall.Signal{
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
+
+// IsSignalIgnoredBySigProxy determines whether sig-proxy should ignore syscall signal
+func IsSignalIgnoredBySigProxy(s syscall.Signal) bool {
+ // Ignore SIGCHLD and SIGPIPE - these are most likely intended for the podman command itself.
+ // SIGURG was added because of golang 1.14 and its preemptive changes causing more signals to "show up".
+ // https://github.com/containers/podman/issues/5483
+ return s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG
+}
diff --git a/pkg/signal/signal_unix.go b/pkg/signal/signal_unix.go
index 7919e3670..01d99d7bc 100644
--- a/pkg/signal/signal_unix.go
+++ b/pkg/signal/signal_unix.go
@@ -87,3 +87,11 @@ var SignalMap = map[string]syscall.Signal{
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
+
+// IsSignalIgnoredBySigProxy determines whether sig-proxy should ignore syscall signal
+func IsSignalIgnoredBySigProxy(s syscall.Signal) bool {
+ // Ignore SIGCHLD and SIGPIPE - these are most likely intended for the podman command itself.
+ // SIGURG was added because of golang 1.14 and its preemptive changes causing more signals to "show up".
+ // https://github.com/containers/podman/issues/5483
+ return s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG
+}
diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go
index 19ae93a61..590aaf978 100644
--- a/pkg/signal/signal_unsupported.go
+++ b/pkg/signal/signal_unsupported.go
@@ -87,3 +87,9 @@ var SignalMap = map[string]syscall.Signal{
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
+
+// IsSignalIgnoredBySigProxy determines whether to sig-proxy should ignore syscall signal
+// keep the container running or not. In unsupported OS this should not ignore any syscall signal.
+func IsSignalIgnoredBySigProxy(s syscall.Signal) bool {
+ return false
+}
diff --git a/pkg/util/utils_freebsd.go b/pkg/util/utils_freebsd.go
index 9b0d7c8c7..ba91308af 100644
--- a/pkg/util/utils_freebsd.go
+++ b/pkg/util/utils_freebsd.go
@@ -13,6 +13,6 @@ func GetContainerPidInformationDescriptors() ([]string, error) {
return []string{}, errors.New("this function is not supported on freebsd")
}
-func AddPrivilegedDevices(g *generate.Generator) error {
+func AddPrivilegedDevices(g *generate.Generator, systemdMode bool) error {
return nil
}
diff --git a/pkg/util/utils_linux.go b/pkg/util/utils_linux.go
index 7b2d98666..07927db1c 100644
--- a/pkg/util/utils_linux.go
+++ b/pkg/util/utils_linux.go
@@ -70,7 +70,7 @@ func FindDeviceNodes() (map[string]string, error) {
return nodes, nil
}
-func AddPrivilegedDevices(g *generate.Generator) error {
+func AddPrivilegedDevices(g *generate.Generator, systemdMode bool) error {
hostDevices, err := getDevices("/dev")
if err != nil {
return err
@@ -104,6 +104,9 @@ func AddPrivilegedDevices(g *generate.Generator) error {
}
} else {
for _, d := range hostDevices {
+ if systemdMode && strings.HasPrefix(d.Path, "/dev/tty") {
+ continue
+ }
g.AddDevice(d)
}
// Add resources device - need to clear the existing one first.
diff --git a/podman.spec.rpkg b/podman.spec.rpkg
index f970e5f25..eeaf49f63 100644
--- a/podman.spec.rpkg
+++ b/podman.spec.rpkg
@@ -6,7 +6,7 @@
%global with_debug 1
# _user_tmpfiles.d currently undefined on rhel
-%if 0%{?rhel}
+%if 0%{?fedora} <= 35 || 0%{?rhel}
%global _user_tmpfilesdir %{_datadir}/user-tmpfiles.d
%endif
diff --git a/test/apiv2/27-containersEvents.at b/test/apiv2/27-containersEvents.at
index e0a66e0ac..a5b5b24a3 100644
--- a/test/apiv2/27-containersEvents.at
+++ b/test/apiv2/27-containersEvents.at
@@ -20,7 +20,7 @@ t GET "libpod/events?stream=false&since=$START" 200 \
t GET "libpod/events?stream=false&since=$START" 200 \
'select(.status | contains("start")).Action=start' \
- 'select(.status | contains("start")).HealthStatus='\
+ 'select(.status | contains("start")).HealthStatus=null'\
# compat api, uses status=die (#12643)
t GET "events?stream=false&since=$START" 200 \
diff --git a/test/e2e/config/containers.conf b/test/e2e/config/containers.conf
index 94bb316b1..3cf20268c 100644
--- a/test/e2e/config/containers.conf
+++ b/test/e2e/config/containers.conf
@@ -76,3 +76,4 @@ testvol6 = "/run/docker/plugins/testvol6.sock"
testvol7 = "/run/docker/plugins/testvol7.sock"
testvol8 = "/run/docker/plugins/testvol8.sock"
testvol9 = "/run/docker/plugins/testvol9.sock"
+image = "/run/docker/plugins/image.sock"
diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go
index bba42c3f4..c76919581 100644
--- a/test/e2e/events_test.go
+++ b/test/e2e/events_test.go
@@ -212,6 +212,16 @@ var _ = Describe("Podman events", func() {
Expect(result).Should(Exit(0))
Expect(result.OutputToStringArray()).To(HaveLen(1))
Expect(result.OutputToString()).To(ContainSubstring("create"))
+
+ ctrName := "testCtr"
+ run := podmanTest.Podman([]string{"create", "--pod", id, "--name", ctrName, ALPINE, "top"})
+ run.WaitWithDefaultTimeout()
+ Expect(run).Should(Exit(0))
+
+ result2 := podmanTest.Podman([]string{"events", "--stream=false", "--filter", fmt.Sprintf("container=%s", ctrName), "--since", "30s"})
+ result2.WaitWithDefaultTimeout()
+ Expect(result2).Should(Exit(0))
+ Expect(result2.OutputToString()).To(ContainSubstring(fmt.Sprintf("pod_id=%s", id)))
})
It("podman events health_status generated", func() {
@@ -229,7 +239,7 @@ var _ = Describe("Podman events", func() {
time.Sleep(1 * time.Second)
}
- result := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=health_status"})
+ result := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=health_status", "--since", "1m"})
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
Expect(len(result.OutputToStringArray())).To(BeNumerically(">=", 1), "Number of health_status events")
diff --git a/test/e2e/volume_create_test.go b/test/e2e/volume_create_test.go
index 499283cab..5dfa4d0fc 100644
--- a/test/e2e/volume_create_test.go
+++ b/test/e2e/volume_create_test.go
@@ -162,4 +162,58 @@ var _ = Describe("Podman volume create", func() {
Expect(inspectOpts).Should(Exit(0))
Expect(inspectOpts.OutputToString()).To(Equal(optionStrFormatExpect))
})
+
+ It("image-backed volume basic functionality", func() {
+ podmanTest.AddImageToRWStore(fedoraMinimal)
+ volName := "testvol"
+ volCreate := podmanTest.Podman([]string{"volume", "create", "--driver", "image", "--opt", fmt.Sprintf("image=%s", fedoraMinimal), volName})
+ volCreate.WaitWithDefaultTimeout()
+ Expect(volCreate).Should(Exit(0))
+
+ runCmd := podmanTest.Podman([]string{"run", "-v", fmt.Sprintf("%s:/test", volName), ALPINE, "cat", "/test/etc/redhat-release"})
+ runCmd.WaitWithDefaultTimeout()
+ Expect(runCmd).Should(Exit(0))
+ Expect(runCmd.OutputToString()).To(ContainSubstring("Fedora"))
+
+ rmCmd := podmanTest.Podman([]string{"rmi", "--force", fedoraMinimal})
+ rmCmd.WaitWithDefaultTimeout()
+ Expect(rmCmd).Should(Exit(0))
+
+ psCmd := podmanTest.Podman([]string{"ps", "-aq"})
+ psCmd.WaitWithDefaultTimeout()
+ Expect(psCmd).Should(Exit(0))
+ Expect(psCmd.OutputToString()).To(BeEmpty())
+
+ volumesCmd := podmanTest.Podman([]string{"volume", "ls", "-q"})
+ volumesCmd.WaitWithDefaultTimeout()
+ Expect(volumesCmd).Should(Exit(0))
+ Expect(volumesCmd.OutputToString()).To(Not(ContainSubstring(volName)))
+ })
+
+ It("image-backed volume force removal", func() {
+ podmanTest.AddImageToRWStore(fedoraMinimal)
+ volName := "testvol"
+ volCreate := podmanTest.Podman([]string{"volume", "create", "--driver", "image", "--opt", fmt.Sprintf("image=%s", fedoraMinimal), volName})
+ volCreate.WaitWithDefaultTimeout()
+ Expect(volCreate).Should(Exit(0))
+
+ runCmd := podmanTest.Podman([]string{"run", "-v", fmt.Sprintf("%s:/test", volName), ALPINE, "cat", "/test/etc/redhat-release"})
+ runCmd.WaitWithDefaultTimeout()
+ Expect(runCmd).Should(Exit(0))
+ Expect(runCmd.OutputToString()).To(ContainSubstring("Fedora"))
+
+ rmCmd := podmanTest.Podman([]string{"volume", "rm", "--force", volName})
+ rmCmd.WaitWithDefaultTimeout()
+ Expect(rmCmd).Should(Exit(0))
+
+ psCmd := podmanTest.Podman([]string{"ps", "-aq"})
+ psCmd.WaitWithDefaultTimeout()
+ Expect(psCmd).Should(Exit(0))
+ Expect(psCmd.OutputToString()).To(BeEmpty())
+
+ volumesCmd := podmanTest.Podman([]string{"volume", "ls", "-q"})
+ volumesCmd.WaitWithDefaultTimeout()
+ Expect(volumesCmd).Should(Exit(0))
+ Expect(volumesCmd.OutputToString()).To(Not(ContainSubstring(volName)))
+ })
})
diff --git a/test/e2e/volume_plugin_test.go b/test/e2e/volume_plugin_test.go
index 33cdcce5b..00498431e 100644
--- a/test/e2e/volume_plugin_test.go
+++ b/test/e2e/volume_plugin_test.go
@@ -60,7 +60,8 @@ var _ = Describe("Podman volume plugins", func() {
Expect(err).ToNot(HaveOccurred())
// Keep this distinct within tests to avoid multiple tests using the same plugin.
- pluginName := "testvol1"
+ // This one verifies that the "image" plugin uses a volume plugin, not the "image" driver.
+ pluginName := "image"
plugin := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", "/run/docker/plugins:/run/docker/plugins", "-v", fmt.Sprintf("%v:%v", pluginStatePath, pluginStatePath), "-d", volumeTest, "--sock-name", pluginName, "--path", pluginStatePath})
plugin.WaitWithDefaultTimeout()
Expect(plugin).Should(Exit(0))
@@ -77,6 +78,12 @@ var _ = Describe("Podman volume plugins", func() {
Expect(arrOutput).To(HaveLen(1))
Expect(arrOutput[0]).To(ContainSubstring(volName))
+ // Verify this is not an image volume.
+ inspect := podmanTest.Podman([]string{"volume", "inspect", volName, "--format", "{{.StorageID}}"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect).Should(Exit(0))
+ Expect(inspect.OutputToString()).To(BeEmpty())
+
remove := podmanTest.Podman([]string{"volume", "rm", volName})
remove.WaitWithDefaultTimeout()
Expect(remove).Should(Exit(0))
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index 2abf749a1..65a1150a3 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -901,4 +901,22 @@ $IMAGE--c_ok" \
run_podman rm $ctr_name
}
+@test "podman run --privileged as root with systemd will not mount /dev/tty" {
+ skip_if_rootless "this test only makes sense as root"
+
+ ctr_name="container-$(random_string 5)"
+ run_podman run --rm -d --privileged --systemd=always --name "$ctr_name" "$IMAGE" /home/podman/pause
+
+ TTYs=$(ls /dev/tty*|sed '/^\/dev\/tty$/d')
+
+ if [[ $TTYs = "" ]]; then
+ die "Did not find any /dev/ttyN devices on local host"
+ else
+ run_podman exec "$ctr_name" ls /dev/
+ assert "$(grep tty <<<$output)" = "tty" "There must be no /dev/ttyN devices in the container"
+ fi
+
+ run_podman stop "$ctr_name"
+}
+
# vim: filetype=sh
diff --git a/test/system/032-sig-proxy.bats b/test/system/032-sig-proxy.bats
new file mode 100644
index 000000000..686df0e1b
--- /dev/null
+++ b/test/system/032-sig-proxy.bats
@@ -0,0 +1,43 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "podman sigkill" {
+ $PODMAN run -i --name foo $IMAGE sh -c 'trap "echo BYE;exit 0" INT;echo READY;while :;do sleep 0.1;done' &
+ local kidpid=$!
+
+ # Wait for container to appear
+ local timeout=5
+ while :;do
+ sleep 0.5
+ run_podman '?' container exists foo
+ if [[ $status -eq 0 ]]; then
+ break
+ fi
+ timeout=$((timeout - 1))
+ if [[ $timeout -eq 0 ]]; then
+ die "Timed out waiting for container to start"
+ fi
+ done
+
+ wait_for_ready foo
+
+ # Signal, and wait for container to exit
+ kill -INT $kidpid
+ local timeout=5
+ while :;do
+ sleep 0.5
+ run_podman logs foo
+ if [[ "$output" =~ BYE ]]; then
+ break
+ fi
+ timeout=$((timeout - 1))
+ if [[ $timeout -eq 0 ]]; then
+ die "Timed out waiting for BYE from container"
+ fi
+ done
+
+ run_podman rm -f -t0 foo
+}
+
+# vim: filetype=sh