summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml5
-rw-r--r--cmd/podman/cliconfig/config.go1
-rw-r--r--cmd/podman/sign.go19
-rw-r--r--completions/bash/podman1
-rw-r--r--contrib/cirrus/99-do-not-use-google-subnets.conflist21
-rw-r--r--contrib/cirrus/lib.sh16
-rwxr-xr-xcontrib/cirrus/setup_environment.sh11
-rwxr-xr-xcontrib/imgprune/entrypoint.sh63
-rw-r--r--docs/podman-image-sign.1.md15
-rw-r--r--libpod/boltdb_state.go46
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/in_memory_state.go20
-rw-r--r--libpod/runtime.go4
-rw-r--r--libpod/runtime_ctr.go15
-rw-r--r--libpod/runtime_renumber.go17
-rw-r--r--libpod/runtime_volume.go4
-rw-r--r--libpod/runtime_volume_linux.go37
-rw-r--r--libpod/state.go10
-rw-r--r--libpod/volume.go36
-rw-r--r--test/e2e/run_volume_test.go8
-rw-r--r--test/e2e/systemd_test.go48
21 files changed, 351 insertions, 53 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 3c50332e2..8d915fbfe 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -274,6 +274,7 @@ meta_task:
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[950d9c64ad78f7b1f0c7e499b42dc058d2b23aa67e38b315e68f557f2aba0bf83068d4734f7b1e1bdd22deabe99629df]
+ # needed for output-masking purposes
GCPNAME: ENCRYPTED[b05d469a0dba8cb479cb00cc7c1f6747c91d17622fba260a986b976aa6c817d4077eacffd4613d6d5f23afc4084fab1d]
GCPPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
@@ -298,9 +299,11 @@ image_prune_task:
memory: 1
env:
- <<: *meta_env_vars
+ # order is significant, Cirrus not always overriding alias values as intended
GCPJSON: ENCRYPTED[4c11d8e09c904c30fc70eecb95c73dec0ddf19976f9b981a0f80f3f6599e8f990bcef93c253ac0277f200850d98528e7]
GCPNAME: ENCRYPTED[7f54557ba6e5a437f11283a53e71baec9ca546f48a9835538cc54d297f79968eb1337d4596a1025b14f9d1c5723fbd29]
+ GCPPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
+ <<: *meta_env_vars
timeout_in: 10m
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index 9bc47333d..98e7aed4b 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -509,6 +509,7 @@ type SignValues struct {
PodmanCommand
Directory string
SignBy string
+ CertDir string
}
type StartValues struct {
diff --git a/cmd/podman/sign.go b/cmd/podman/sign.go
index de289047a..63ba9b904 100644
--- a/cmd/podman/sign.go
+++ b/cmd/podman/sign.go
@@ -46,7 +46,7 @@ func init() {
flags := signCommand.Flags()
flags.StringVarP(&signCommand.Directory, "directory", "d", "", "Define an alternate directory to store signatures")
flags.StringVar(&signCommand.SignBy, "sign-by", "", "Name of the signing key")
-
+ flags.StringVar(&signCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
}
// SignatureStoreDir defines default directory to store signatures
@@ -76,6 +76,13 @@ func signCmd(c *cliconfig.SignValues) error {
}
}
+ sc := runtime.SystemContext()
+ sc.DockerCertPath = c.CertDir
+
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerCertPath: c.CertDir,
+ }
+
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
return errors.Wrap(err, "error initializing GPG")
@@ -85,7 +92,7 @@ func signCmd(c *cliconfig.SignValues) error {
return errors.Wrap(err, "signing is not supported")
}
- systemRegistriesDirPath := trust.RegistriesDirPath(runtime.SystemContext())
+ systemRegistriesDirPath := trust.RegistriesDirPath(sc)
registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
if err != nil {
return errors.Wrapf(err, "error reading registry configuration")
@@ -96,10 +103,14 @@ func signCmd(c *cliconfig.SignValues) error {
if err != nil {
return errors.Wrapf(err, "error parsing image name")
}
- rawSource, err := srcRef.NewImageSource(getContext(), runtime.SystemContext())
+ rawSource, err := srcRef.NewImageSource(getContext(), sc)
if err != nil {
return errors.Wrapf(err, "error getting image source")
}
+ err = rawSource.Close()
+ if err != nil {
+ logrus.Errorf("unable to close new image source %q", err)
+ }
manifest, _, err := rawSource.GetManifest(getContext(), nil)
if err != nil {
return errors.Wrapf(err, "error getting manifest")
@@ -114,7 +125,7 @@ func signCmd(c *cliconfig.SignValues) error {
if err != nil {
return err
}
- newImage, err := runtime.ImageRuntime().New(getContext(), signimage, rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{SignBy: signby}, nil, util.PullImageMissing)
+ newImage, err := runtime.ImageRuntime().New(getContext(), signimage, rtc.SignaturePolicyPath, "", os.Stderr, &dockerRegistryOptions, image.SigningOptions{SignBy: signby}, nil, util.PullImageMissing)
if err != nil {
return errors.Wrapf(err, "error pulling image %s", signimage)
}
diff --git a/completions/bash/podman b/completions/bash/podman
index 7280f4040..e6ffb135f 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -2669,6 +2669,7 @@ _podman_container_runlabel() {
_podman_image_sign() {
local options_with_args="
+ --cert-dir
-d
--directory
--sign-by
diff --git a/contrib/cirrus/99-do-not-use-google-subnets.conflist b/contrib/cirrus/99-do-not-use-google-subnets.conflist
new file mode 100644
index 000000000..e9ab638ed
--- /dev/null
+++ b/contrib/cirrus/99-do-not-use-google-subnets.conflist
@@ -0,0 +1,21 @@
+{
+ "cniVersion": "0.4.0",
+ "name": "do-not-use-google-subnets",
+ "plugins": [
+ {
+ "type": "bridge",
+ "name": "do-not-use-google-subnets",
+ "bridge": "do-not-use-google-subnets",
+ "ipam": {
+ "type": "host-local",
+ "ranges": [
+ [
+ {
+ "subnet": "10.128.0.0/9"
+ }
+ ]
+ ]
+ }
+ }
+ ]
+}
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 555f3e717..f66e63140 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -323,13 +323,15 @@ EOF
install_test_configs(){
echo "Installing cni config, policy and registry config"
- req_env_var GOSRC
- sudo install -D -m 755 $GOSRC/cni/87-podman-bridge.conflist \
- /etc/cni/net.d/87-podman-bridge.conflist
- sudo install -D -m 755 $GOSRC/test/policy.json \
- /etc/containers/policy.json
- sudo install -D -m 755 $GOSRC/test/registries.conf \
- /etc/containers/registries.conf
+ req_env_var GOSRC SCRIPT_BASE
+ cd $GOSRC
+ install -v -D -m 644 ./cni/87-podman-bridge.conflist /etc/cni/net.d/
+ # This config must always sort last in the list of networks (podman picks first one
+ # as the default). This config prevents allocation of network address space used
+ # by default in google cloud. https://cloud.google.com/vpc/docs/vpc#ip-ranges
+ install -v -D -m 644 $SCRIPT_BASE/99-do-not-use-google-subnets.conflist /etc/cni/net.d/
+ install -v -D -m 644 ./test/policy.json /etc/containers/
+ install -v -D -m 644 ./test/registries.conf /etc/containers/
}
# Remove all files (except conmon, for now) provided by the distro version of podman.
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index 323a05489..7c7659169 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -44,11 +44,15 @@ case "${OS_REL_VER}" in
;;
fedora-30) ;& # continue to next item
fedora-29)
+ # All SELinux distros need this for systemd-in-a-container
+ setsebool container_manage_cgroup true
if [[ "$ADD_SECOND_PARTITION" == "true" ]]; then
bash "$SCRIPT_BASE/add_second_partition.sh"; fi
;;
centos-7) # Current VM is an image-builder-image no local podman/testing
- echo "No further setup required for VM image building"
+ echo "No further setup required for VM image building"
+ # All SELinux distros need this for systemd-in-a-container
+ setsebool container_manage_cgroup true
exit 0
;;
*) bad_os_id_ver ;;
@@ -57,8 +61,7 @@ esac
# Reload to incorporate any changes from above
source "$SCRIPT_BASE/lib.sh"
-install_test_configs
-
+# Must execute before possible setup_rootless()
make install.tools
case "$SPECIALMODE" in
@@ -92,3 +95,5 @@ case "$SPECIALMODE" in
*)
die 111 "Unsupported \$SPECIALMODE: $SPECIALMODE"
esac
+
+install_test_configs
diff --git a/contrib/imgprune/entrypoint.sh b/contrib/imgprune/entrypoint.sh
index a4b77523b..829e9938e 100755
--- a/contrib/imgprune/entrypoint.sh
+++ b/contrib/imgprune/entrypoint.sh
@@ -6,27 +6,49 @@ source /usr/local/bin/lib_entrypoint.sh
req_env_var GCPJSON GCPNAME GCPPROJECT IMGNAMES
+BASE_IMAGES=""
+# When executing under Cirrus-CI, have access to current source
+if [[ "$CI" == "true" ]] && [[ -r "$CIRRUS_WORKING_DIR/$SCRIPT_BASE" ]]
+then
+ # Avoid importing anything that might conflict
+ eval "$(egrep -sh '^export .+BASE_IMAGE=' < $CIRRUS_WORKING_DIR/$SCRIPT_BASE/lib.sh)"
+ BASE_IMAGES="$UBUNTU_BASE_IMAGE $PRIOR_UBUNTU_BASE_IMAGE $FEDORA_BASE_IMAGE $PRIOR_FEDORA_BASE_IMAGE"
+else
+ # metadata labeling may have broken for some reason in the future
+ echo "Warning: Running outside of Cirrus-CI, very minor-risk of base-image deletion."
+fi
+
gcloud_init
# For safety's sake + limit nr background processes
-PRUNE_LIMIT=10
+PRUNE_LIMIT=5
THEFUTURE=$(date --date='+1 hour' +%s)
-TOO_OLD='90 days ago'
+TOO_OLD='30 days ago'
THRESHOLD=$(date --date="$TOO_OLD" +%s)
# Format Ref: https://cloud.google.com/sdk/gcloud/reference/topic/formats
FORMAT='value[quote](name,selfLink,creationTimestamp,labels)'
PROJRE="/v1/projects/$GCPPROJECT/global/"
-BASE_IMAGE_RE='cloud-base'
-RECENTLY=$(date --date='30 days ago' --iso-8601=date)
-EXCLUDE="$IMGNAMES $IMAGE_BUILDER_CACHE_IMAGE_NAME" # whitespace separated values
+RECENTLY=$(date --date='3 days ago' --iso-8601=date)
# Filter Ref: https://cloud.google.com/sdk/gcloud/reference/topic/filters
-FILTER="selfLink~$PROJRE AND creationTimestamp<$RECENTLY AND NOT name=($EXCLUDE)"
+FILTER="selfLink~$PROJRE AND creationTimestamp<$RECENTLY AND NOT name=($IMGNAMES $BASE_IMAGES)"
TODELETE=$(mktemp -p '' todelete.XXXXXX)
+IMGCOUNT=$(mktemp -p '' imgcount.XXXXXX)
+
+# Search-loop runs in a sub-process, must store count in file
+echo "0" > "$IMGCOUNT"
+count_image() {
+ local count
+ count=$(<"$IMGCOUNT")
+ let 'count+=1'
+ echo "$count" > "$IMGCOUNT"
+}
-echo "Searching images for pruning candidates older than $TOO_OLD ($THRESHOLD):"
+echo "Using filter: $FILTER"
+echo "Searching images for pruning candidates older than $TOO_OLD ($(date --date="$TOO_OLD" --iso-8601=date)):"
$GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \
while read name selfLink creationTimestamp labels
do
+ count_image
created_ymd=$(date --date=$creationTimestamp --iso-8601=date)
last_used=$(egrep --only-matching --max-count=1 'last-used=[[:digit:]]+' <<< $labels || true)
markmsgpfx="Marking $name (created $created_ymd) for deletion"
@@ -52,16 +74,29 @@ $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \
echo "$name" >> $TODELETE
continue
fi
-
- echo "NOT $markmsgpfx: last used on $last_used_ymd)"
done
-echo "Pruning up to $PRUNE_LIMIT images that were marked for deletion:"
-for image_name in $(tail -$PRUNE_LIMIT $TODELETE | sort --random-sort)
+COUNT=$(<"$IMGCOUNT")
+echo "########################################################################"
+echo "Deleting up to $PRUNE_LIMIT images marked ($(wc -l < $TODELETE)) of all searched ($COUNT):"
+
+# Require a minimum number of images to exist
+NEED="$[$PRUNE_LIMIT*2]"
+if [[ "$COUNT" -lt "$NEED" ]]
+then
+ die 0 Safety-net Insufficient images \($COUNT\) to process deletions \($NEED\)
+ exit 0
+fi
+
+for image_name in $(sort --random-sort $TODELETE | tail -$PRUNE_LIMIT)
do
- # This can take quite some time (minutes), run in parallel disconnected from terminal
- echo "TODO: Would have: $GCLOUD compute images delete $image_name &"
- sleep "$[1+RANDOM/1000]s" & # Simlate background operation
+ if echo "$IMGNAMES $BASE_IMAGES" | grep -q "$image_name"
+ then
+ # double-verify in-use images were filtered out in search loop above
+ die 8 FATAL ATTEMPT TO DELETE IN-USE IMAGE \'$image_name\' - THIS SHOULD NEVER HAPPEN
+ fi
+ echo "Deleting $image_name in parallel..."
+ $GCLOUD compute images delete $image_name &
done
wait || true # Nothing to delete: No background jobs
diff --git a/docs/podman-image-sign.1.md b/docs/podman-image-sign.1.md
index 61df3b3bd..ca438b438 100644
--- a/docs/podman-image-sign.1.md
+++ b/docs/podman-image-sign.1.md
@@ -12,14 +12,23 @@ been pulled from a registry. The signature will be written to a directory
derived from the registry configuration files in /etc/containers/registries.d. By default, the signature will be written into /var/lib/containers/sigstore directory.
## OPTIONS
+
**--help**, **-h**
- Print usage statement.
+
+Print usage statement.
+
+**--cert-dir**=*path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
**--directory**, **-d**=*dir*
- Store the signatures in the specified directory. Default: /var/lib/containers/sigstore
+
+Store the signatures in the specified directory. Default: /var/lib/containers/sigstore
**--sign-by**=*identity*
- Override the default identity of the signature.
+
+Override the default identity of the signature.
## EXAMPLES
Sign the busybox image with the identify of foo@bar.com with a user's keyring and save the signature in /tmp/signatures/.
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 176781f07..1de8d80c9 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -870,7 +870,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID())
+ return errors.Wrapf(err, "error marshalling new configuration JSON for pod %s", pod.ID())
}
db, err := s.getDBCon()
@@ -900,6 +900,50 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
return err
}
+// RewriteVolumeConfig rewrites a volume's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for volume %q", volume.Name())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBkt.Bucket([]byte(volume.Name()))
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found in DB", volume.Name())
+ }
+
+ if err := volDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating volume %q config JSON", volume.Name())
+ }
+
+ return nil
+ })
+ return err
+}
+
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 408ef7224..6e4179835 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -449,6 +449,13 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
+ // Get the lock
+ lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving lock for volume %q", string(name))
+ }
+ volume.lock = lock
+
volume.runtime = s.runtime
volume.valid = true
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 7c4abd25d..a9b735327 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -425,6 +425,26 @@ func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
return nil
}
+// RewriteVolumeConfig rewrites a volume's configuration.
+// This function is DANGEROUS, even with in-memory state.
+// Please read the full comment in state.go before using it.
+func (s *InMemoryState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error {
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ // If the volume does not exist, return error
+ stateVol, ok := s.volumes[volume.Name()]
+ if !ok {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name())
+ }
+
+ stateVol.config = newCfg
+
+ return nil
+}
+
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 4d6a80d0b..28774773e 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -76,10 +76,6 @@ var (
// place of the configuration file pointed to by ConfigPath.
OverrideConfigPath = etcDir + "/containers/libpod.conf"
- // DefaultInfraImage to use for infra container
-
- // DefaultInfraCommand to be run in an infra container
-
// DefaultSHMLockPath is the default path for SHM locks
DefaultSHMLockPath = "/libpod_lock"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 92b2faefb..acd317d20 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -253,10 +253,13 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
// Go through named volumes and add them.
// If they don't exist they will be created using basic options.
+ // Maintain an array of them - we need to lock them later.
+ ctrNamedVolumes := make([]*Volume, 0, len(ctr.config.NamedVolumes))
for _, vol := range ctr.config.NamedVolumes {
// Check if it exists already
- _, err := r.state.Volume(vol.Name)
+ dbVol, err := r.state.Volume(vol.Name)
if err == nil {
+ ctrNamedVolumes = append(ctrNamedVolumes, dbVol)
// The volume exists, we're good
continue
} else if errors.Cause(err) != config2.ErrNoSuchVolume {
@@ -275,6 +278,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
if err := ctr.copyWithTarFromImage(vol.Dest, newVol.MountPoint()); err != nil && !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Name)
}
+
+ ctrNamedVolumes = append(ctrNamedVolumes, newVol)
}
if ctr.config.LogPath == "" && ctr.config.LogDriver != JournaldLogging {
@@ -291,6 +296,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
}
+ // Lock all named volumes we are adding ourself to, to ensure we can't
+ // use a volume being removed.
+ for _, namedVol := range ctrNamedVolumes {
+ toLock := namedVol
+ toLock.lock.Lock()
+ defer toLock.lock.Unlock()
+ }
+
// Add the container to the state
// TODO: May be worth looking into recovering from name/ID collisions here
if ctr.config.Pod != "" {
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index 735ffba34..9de2556b2 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -53,6 +53,23 @@ func (r *Runtime) renumberLocks() error {
return err
}
}
+ allVols, err := r.state.AllVolumes()
+ if err != nil {
+ return err
+ }
+ for _, vol := range allVols {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name())
+ }
+
+ vol.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewriteVolumeConfig(vol, vol.config); err != nil {
+ return err
+ }
+ }
r.newSystemEvent(events.Renumber)
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index d05db936b..512e778a1 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -36,6 +36,10 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error
return nil
}
}
+
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
return r.removeVolume(ctx, v, force)
}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 84703787d..70296248c 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -28,7 +28,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
}
// newVolume creates a new empty volume
-func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
+func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, Err error) {
volume, err := newVolume(r)
if err != nil {
return nil, errors.Wrapf(err, "error creating volume")
@@ -68,6 +68,21 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error allocating lock for new volume")
+ }
+ volume.lock = lock
+ volume.config.LockID = volume.lock.ID()
+
+ defer func() {
+ if Err != nil {
+ if err := volume.lock.Free(); err != nil {
+ logrus.Errorf("Error freeing volume lock after failed creation: %v", err)
+ }
+ }
+ }()
+
volume.valid = true
// Add the volume to state
@@ -110,6 +125,8 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name())
}
+ logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
+
// TODO: do we want to set force here when removing
// containers?
// I'm inclined to say no, in case someone accidentally
@@ -128,12 +145,24 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ var removalErr error
+
+ // Free the volume's lock
+ if err := v.lock.Free(); err != nil {
+ removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
+ }
+
+ // Delete the mountpoint path of the volume, that is delete the volume
+ // from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ } else {
+ logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err)
+ }
}
defer v.newVolumeEvent(events.Remove)
logrus.Debugf("Removed volume %s", v.Name())
- return nil
+ return removalErr
}
diff --git a/libpod/state.go b/libpod/state.go
index d0ad1a1f8..5d704e69a 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -115,12 +115,20 @@ type State interface {
// answer is this: use this only very sparingly, and only if you really
// know what you're doing.
RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
- // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING.
+ // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
// This function is identical to RewriteContainerConfig, save for the
// fact that it is used with pods instead.
// It is subject to the same conditions as RewriteContainerConfig.
// Please do not use this unless you know what you're doing.
RewritePodConfig(pod *Pod, newCfg *PodConfig) error
+ // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
+ // This function is identical to RewriteContainerConfig, save for the
+ // fact that it is used with volumes instead.
+ // It is subject to the same conditions as RewriteContainerConfig.
+ // The exception is that volumes do not have IDs, so only volume name
+ // cannot be altered.
+ // Please do not use this unless you know what you're doing.
+ RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error
// Accepts full ID of pod.
// If the pod given is not in the set namespace, an error will be
diff --git a/libpod/volume.go b/libpod/volume.go
index 74126b49b..abfa7b3f4 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -2,6 +2,8 @@ package libpod
import (
"time"
+
+ "github.com/containers/libpod/libpod/lock"
)
// Volume is the type used to create named volumes
@@ -11,21 +13,35 @@ type Volume struct {
valid bool
runtime *Runtime
+ lock lock.Locker
}
// VolumeConfig holds the volume's config information
type VolumeConfig struct {
- // Name of the volume
+ // Name of the volume.
Name string `json:"name"`
-
- Labels map[string]string `json:"labels"`
- Driver string `json:"driver"`
- MountPoint string `json:"mountPoint"`
- CreatedTime time.Time `json:"createdAt,omitempty"`
- Options map[string]string `json:"options"`
- IsCtrSpecific bool `json:"ctrSpecific"`
- UID int `json:"uid"`
- GID int `json:"gid"`
+ // ID of the volume's lock.
+ LockID uint32 `json:"lockID"`
+ // Labels for the volume.
+ Labels map[string]string `json:"labels"`
+ // The volume driver. Empty string or local does not activate a volume
+ // driver, all other volumes will.
+ Driver string `json:"driver"`
+ // The location the volume is mounted at.
+ MountPoint string `json:"mountPoint"`
+ // Time the volume was created.
+ CreatedTime time.Time `json:"createdAt,omitempty"`
+ // Options to pass to the volume driver. For the local driver, this is
+ // a list of mount options. For other drivers, they are passed to the
+ // volume driver handling the volume.
+ Options map[string]string `json:"options"`
+ // Whether this volume was created for a specific container and will be
+ // removed with it.
+ IsCtrSpecific bool `json:"ctrSpecific"`
+ // UID the volume will be created as.
+ UID int `json:"uid"`
+ // GID the volume will be created as.
+ GID int `json:"gid"`
}
// Name retrieves the volume's name
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 1e0b84310..abb93a149 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -154,4 +154,12 @@ var _ = Describe("Podman run with volumes", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Not(Equal(0)))
})
+
+ It("podman run with volume flag and multiple named volumes", func() {
+ session := podmanTest.Podman([]string{"run", "--rm", "-v", "testvol1:/testvol1", "-v", "testvol2:/testvol2", ALPINE, "grep", "/testvol", "/proc/self/mountinfo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("/testvol1"))
+ Expect(session.OutputToString()).To(ContainSubstring("/testvol2"))
+ })
})
diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go
index 91604867d..02778d493 100644
--- a/test/e2e/systemd_test.go
+++ b/test/e2e/systemd_test.go
@@ -5,7 +5,10 @@ package integration
import (
"io/ioutil"
"os"
+ "strings"
+ "time"
+ "github.com/containers/libpod/pkg/cgroups"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -77,4 +80,49 @@ WantedBy=multi-user.target
status := SystemExec("bash", []string{"-c", "systemctl status redis"})
Expect(status.OutputToString()).To(ContainSubstring("active (running)"))
})
+
+ It("podman run container with systemd PID1", func() {
+ cgroupsv2, err := cgroups.IsCgroup2UnifiedMode()
+ Expect(err).To(BeNil())
+ if cgroupsv2 {
+ Skip("systemd test does not work in cgroups V2 mode yet")
+ }
+
+ systemdImage := "fedora"
+ pull := podmanTest.Podman([]string{"pull", systemdImage})
+ pull.WaitWithDefaultTimeout()
+ Expect(pull.ExitCode()).To(Equal(0))
+
+ ctrName := "testSystemd"
+ run := podmanTest.Podman([]string{"run", "--name", ctrName, "-t", "-i", "-d", systemdImage, "init"})
+ run.WaitWithDefaultTimeout()
+ Expect(run.ExitCode()).To(Equal(0))
+ ctrID := run.OutputToString()
+
+ logs := podmanTest.Podman([]string{"logs", ctrName})
+ logs.WaitWithDefaultTimeout()
+ Expect(logs.ExitCode()).To(Equal(0))
+
+ // Give container 10 seconds to start
+ started := false
+ for i := 0; i < 10; i++ {
+ runningCtrs := podmanTest.Podman([]string{"ps", "-q", "--no-trunc"})
+ runningCtrs.WaitWithDefaultTimeout()
+ Expect(runningCtrs.ExitCode()).To(Equal(0))
+
+ if strings.Contains(runningCtrs.OutputToString(), ctrID) {
+ started = true
+ break
+ }
+
+ time.Sleep(1 * time.Second)
+ }
+
+ Expect(started).To(BeTrue())
+
+ systemctl := podmanTest.Podman([]string{"exec", "-t", "-i", ctrName, "systemctl", "status", "--no-pager"})
+ systemctl.WaitWithDefaultTimeout()
+ Expect(systemctl.ExitCode()).To(Equal(0))
+ Expect(strings.Contains(systemctl.OutputToString(), "State:")).To(BeTrue())
+ })
})