summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml20
-rwxr-xr-xcontrib/cirrus/setup_environment.sh6
-rwxr-xr-xcontrib/imgprune/entrypoint.sh63
-rw-r--r--libpod/boltdb_state.go46
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/image/image.go6
-rw-r--r--libpod/in_memory_state.go20
-rw-r--r--libpod/runtime.go4
-rw-r--r--libpod/runtime_ctr.go15
-rw-r--r--libpod/runtime_renumber.go17
-rw-r--r--libpod/runtime_volume.go4
-rw-r--r--libpod/runtime_volume_linux.go37
-rw-r--r--libpod/state.go10
-rw-r--r--libpod/volume.go36
-rw-r--r--test/e2e/run_volume_test.go8
-rw-r--r--test/e2e/systemd_test.go48
-rw-r--r--test/system/065-cp.bats229
17 files changed, 533 insertions, 43 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index f034a5b37..f49b1d312 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -270,6 +270,7 @@ meta_task:
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[950d9c64ad78f7b1f0c7e499b42dc058d2b23aa67e38b315e68f557f2aba0bf83068d4734f7b1e1bdd22deabe99629df]
+ # needed for output-masking purposes
GCPNAME: ENCRYPTED[b05d469a0dba8cb479cb00cc7c1f6747c91d17622fba260a986b976aa6c817d4077eacffd4613d6d5f23afc4084fab1d]
GCPPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
@@ -294,9 +295,11 @@ image_prune_task:
memory: 1
env:
- <<: *meta_env_vars
+ # order is significant, Cirrus not always overriding alias values as intended
GCPJSON: ENCRYPTED[4c11d8e09c904c30fc70eecb95c73dec0ddf19976f9b981a0f80f3f6599e8f990bcef93c253ac0277f200850d98528e7]
GCPNAME: ENCRYPTED[7f54557ba6e5a437f11283a53e71baec9ca546f48a9835538cc54d297f79968eb1337d4596a1025b14f9d1c5723fbd29]
+ GCPPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
+ <<: *meta_env_vars
timeout_in: 10m
@@ -525,6 +528,19 @@ special_testing_endpoint_task:
always:
<<: *standardlogs
+
+test_building_snap_task:
+
+ depends_on:
+ - "gating"
+
+ container:
+ image: yakshaveinc/snapcraft:core18
+ snapcraft_script:
+ - 'apt-get -y update'
+ - 'cd contrib/snapcraft && snapcraft'
+
+
# Test building of new cache-images for future PR testing, in this PR.
test_build_cache_images_task:
@@ -633,6 +649,7 @@ success_task:
- "special_testing_cross"
- "special_testing_endpoint"
- "test_build_cache_images"
+ - "test_building_snap"
- "verify_test_built_images"
env:
@@ -674,6 +691,7 @@ release_task:
- "special_testing_cross"
- "special_testing_endpoint"
- "test_build_cache_images"
+ - "test_building_snap"
- "verify_test_built_images"
- "success"
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index e0236a54d..463647d2f 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -44,11 +44,15 @@ case "${OS_REL_VER}" in
;;
fedora-30) ;& # continue to next item
fedora-29)
+ # All SELinux distros need this for systemd-in-a-container
+ setsebool container_manage_cgroup true
if [[ "$ADD_SECOND_PARTITION" == "true" ]]; then
bash "$SCRIPT_BASE/add_second_partition.sh"; fi
;;
centos-7) # Current VM is an image-builder-image no local podman/testing
- echo "No further setup required for VM image building"
+ echo "No further setup required for VM image building"
+ # All SELinux distros need this for systemd-in-a-container
+ setsebool container_manage_cgroup true
exit 0
;;
*) bad_os_id_ver ;;
diff --git a/contrib/imgprune/entrypoint.sh b/contrib/imgprune/entrypoint.sh
index a4b77523b..829e9938e 100755
--- a/contrib/imgprune/entrypoint.sh
+++ b/contrib/imgprune/entrypoint.sh
@@ -6,27 +6,49 @@ source /usr/local/bin/lib_entrypoint.sh
req_env_var GCPJSON GCPNAME GCPPROJECT IMGNAMES
+BASE_IMAGES=""
+# When executing under Cirrus-CI, have access to current source
+if [[ "$CI" == "true" ]] && [[ -r "$CIRRUS_WORKING_DIR/$SCRIPT_BASE" ]]
+then
+ # Avoid importing anything that might conflict
+ eval "$(egrep -sh '^export .+BASE_IMAGE=' < $CIRRUS_WORKING_DIR/$SCRIPT_BASE/lib.sh)"
+ BASE_IMAGES="$UBUNTU_BASE_IMAGE $PRIOR_UBUNTU_BASE_IMAGE $FEDORA_BASE_IMAGE $PRIOR_FEDORA_BASE_IMAGE"
+else
+ # metadata labeling may have broken for some reason in the future
+ echo "Warning: Running outside of Cirrus-CI, very minor-risk of base-image deletion."
+fi
+
gcloud_init
# For safety's sake + limit nr background processes
-PRUNE_LIMIT=10
+PRUNE_LIMIT=5
THEFUTURE=$(date --date='+1 hour' +%s)
-TOO_OLD='90 days ago'
+TOO_OLD='30 days ago'
THRESHOLD=$(date --date="$TOO_OLD" +%s)
# Format Ref: https://cloud.google.com/sdk/gcloud/reference/topic/formats
FORMAT='value[quote](name,selfLink,creationTimestamp,labels)'
PROJRE="/v1/projects/$GCPPROJECT/global/"
-BASE_IMAGE_RE='cloud-base'
-RECENTLY=$(date --date='30 days ago' --iso-8601=date)
-EXCLUDE="$IMGNAMES $IMAGE_BUILDER_CACHE_IMAGE_NAME" # whitespace separated values
+RECENTLY=$(date --date='3 days ago' --iso-8601=date)
# Filter Ref: https://cloud.google.com/sdk/gcloud/reference/topic/filters
-FILTER="selfLink~$PROJRE AND creationTimestamp<$RECENTLY AND NOT name=($EXCLUDE)"
+FILTER="selfLink~$PROJRE AND creationTimestamp<$RECENTLY AND NOT name=($IMGNAMES $BASE_IMAGES)"
TODELETE=$(mktemp -p '' todelete.XXXXXX)
+IMGCOUNT=$(mktemp -p '' imgcount.XXXXXX)
+
+# Search-loop runs in a sub-process, must store count in file
+echo "0" > "$IMGCOUNT"
+count_image() {
+ local count
+ count=$(<"$IMGCOUNT")
+ let 'count+=1'
+ echo "$count" > "$IMGCOUNT"
+}
-echo "Searching images for pruning candidates older than $TOO_OLD ($THRESHOLD):"
+echo "Using filter: $FILTER"
+echo "Searching images for pruning candidates older than $TOO_OLD ($(date --date="$TOO_OLD" --iso-8601=date)):"
$GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \
while read name selfLink creationTimestamp labels
do
+ count_image
created_ymd=$(date --date=$creationTimestamp --iso-8601=date)
last_used=$(egrep --only-matching --max-count=1 'last-used=[[:digit:]]+' <<< $labels || true)
markmsgpfx="Marking $name (created $created_ymd) for deletion"
@@ -52,16 +74,29 @@ $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \
echo "$name" >> $TODELETE
continue
fi
-
- echo "NOT $markmsgpfx: last used on $last_used_ymd)"
done
-echo "Pruning up to $PRUNE_LIMIT images that were marked for deletion:"
-for image_name in $(tail -$PRUNE_LIMIT $TODELETE | sort --random-sort)
+COUNT=$(<"$IMGCOUNT")
+echo "########################################################################"
+echo "Deleting up to $PRUNE_LIMIT images marked ($(wc -l < $TODELETE)) of all searched ($COUNT):"
+
+# Require a minimum number of images to exist
+NEED="$[$PRUNE_LIMIT*2]"
+if [[ "$COUNT" -lt "$NEED" ]]
+then
+ die 0 Safety-net Insufficient images \($COUNT\) to process deletions \($NEED\)
+ exit 0
+fi
+
+for image_name in $(sort --random-sort $TODELETE | tail -$PRUNE_LIMIT)
do
- # This can take quite some time (minutes), run in parallel disconnected from terminal
- echo "TODO: Would have: $GCLOUD compute images delete $image_name &"
- sleep "$[1+RANDOM/1000]s" & # Simlate background operation
+ if echo "$IMGNAMES $BASE_IMAGES" | grep -q "$image_name"
+ then
+ # double-verify in-use images were filtered out in search loop above
+ die 8 FATAL ATTEMPT TO DELETE IN-USE IMAGE \'$image_name\' - THIS SHOULD NEVER HAPPEN
+ fi
+ echo "Deleting $image_name in parallel..."
+ $GCLOUD compute images delete $image_name &
done
wait || true # Nothing to delete: No background jobs
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 176781f07..1de8d80c9 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -870,7 +870,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID())
+ return errors.Wrapf(err, "error marshalling new configuration JSON for pod %s", pod.ID())
}
db, err := s.getDBCon()
@@ -900,6 +900,50 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
return err
}
+// RewriteVolumeConfig rewrites a volume's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for volume %q", volume.Name())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBkt.Bucket([]byte(volume.Name()))
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found in DB", volume.Name())
+ }
+
+ if err := volDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating volume %q config JSON", volume.Name())
+ }
+
+ return nil
+ })
+ return err
+}
+
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 408ef7224..6e4179835 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -449,6 +449,13 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
+ // Get the lock
+ lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving lock for volume %q", string(name))
+ }
+ volume.lock = lock
+
volume.runtime = s.runtime
volume.valid = true
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 1ff271a4d..0be6eeeb9 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -12,7 +12,6 @@ import (
"syscall"
"time"
- types2 "github.com/containernetworking/cni/pkg/types"
cp "github.com/containers/image/copy"
"github.com/containers/image/directory"
dockerarchive "github.com/containers/image/docker/archive"
@@ -384,11 +383,6 @@ func (i *Image) Remove(ctx context.Context, force bool) error {
return nil
}
-// Decompose an Image
-func (i *Image) Decompose() error {
- return types2.NotImplementedError
-}
-
// TODO: Rework this method to not require an assembly of the fq name with transport
/*
// GetManifest tries to GET an images manifest, returns nil on success and err on failure
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 7c4abd25d..a9b735327 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -425,6 +425,26 @@ func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
return nil
}
+// RewriteVolumeConfig rewrites a volume's configuration.
+// This function is DANGEROUS, even with in-memory state.
+// Please read the full comment in state.go before using it.
+func (s *InMemoryState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error {
+ if !volume.valid {
+ return define.ErrVolumeRemoved
+ }
+
+ // If the volume does not exist, return error
+ stateVol, ok := s.volumes[volume.Name()]
+ if !ok {
+ volume.valid = false
+ return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name())
+ }
+
+ stateVol.config = newCfg
+
+ return nil
+}
+
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 4d6a80d0b..28774773e 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -76,10 +76,6 @@ var (
// place of the configuration file pointed to by ConfigPath.
OverrideConfigPath = etcDir + "/containers/libpod.conf"
- // DefaultInfraImage to use for infra container
-
- // DefaultInfraCommand to be run in an infra container
-
// DefaultSHMLockPath is the default path for SHM locks
DefaultSHMLockPath = "/libpod_lock"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 92b2faefb..acd317d20 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -253,10 +253,13 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
// Go through named volumes and add them.
// If they don't exist they will be created using basic options.
+ // Maintain an array of them - we need to lock them later.
+ ctrNamedVolumes := make([]*Volume, 0, len(ctr.config.NamedVolumes))
for _, vol := range ctr.config.NamedVolumes {
// Check if it exists already
- _, err := r.state.Volume(vol.Name)
+ dbVol, err := r.state.Volume(vol.Name)
if err == nil {
+ ctrNamedVolumes = append(ctrNamedVolumes, dbVol)
// The volume exists, we're good
continue
} else if errors.Cause(err) != config2.ErrNoSuchVolume {
@@ -275,6 +278,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
if err := ctr.copyWithTarFromImage(vol.Dest, newVol.MountPoint()); err != nil && !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Name)
}
+
+ ctrNamedVolumes = append(ctrNamedVolumes, newVol)
}
if ctr.config.LogPath == "" && ctr.config.LogDriver != JournaldLogging {
@@ -291,6 +296,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
}
+ // Lock all named volumes we are adding ourself to, to ensure we can't
+ // use a volume being removed.
+ for _, namedVol := range ctrNamedVolumes {
+ toLock := namedVol
+ toLock.lock.Lock()
+ defer toLock.lock.Unlock()
+ }
+
// Add the container to the state
// TODO: May be worth looking into recovering from name/ID collisions here
if ctr.config.Pod != "" {
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index 735ffba34..9de2556b2 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -53,6 +53,23 @@ func (r *Runtime) renumberLocks() error {
return err
}
}
+ allVols, err := r.state.AllVolumes()
+ if err != nil {
+ return err
+ }
+ for _, vol := range allVols {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name())
+ }
+
+ vol.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewriteVolumeConfig(vol, vol.config); err != nil {
+ return err
+ }
+ }
r.newSystemEvent(events.Renumber)
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index d05db936b..512e778a1 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -36,6 +36,10 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error
return nil
}
}
+
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
return r.removeVolume(ctx, v, force)
}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 84703787d..70296248c 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -28,7 +28,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
}
// newVolume creates a new empty volume
-func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
+func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, Err error) {
volume, err := newVolume(r)
if err != nil {
return nil, errors.Wrapf(err, "error creating volume")
@@ -68,6 +68,21 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error allocating lock for new volume")
+ }
+ volume.lock = lock
+ volume.config.LockID = volume.lock.ID()
+
+ defer func() {
+ if Err != nil {
+ if err := volume.lock.Free(); err != nil {
+ logrus.Errorf("Error freeing volume lock after failed creation: %v", err)
+ }
+ }
+ }()
+
volume.valid = true
// Add the volume to state
@@ -110,6 +125,8 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name())
}
+ logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
+
// TODO: do we want to set force here when removing
// containers?
// I'm inclined to say no, in case someone accidentally
@@ -128,12 +145,24 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ var removalErr error
+
+ // Free the volume's lock
+ if err := v.lock.Free(); err != nil {
+ removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
+ }
+
+ // Delete the mountpoint path of the volume, that is delete the volume
+ // from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ if removalErr == nil {
+ removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ } else {
+ logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err)
+ }
}
defer v.newVolumeEvent(events.Remove)
logrus.Debugf("Removed volume %s", v.Name())
- return nil
+ return removalErr
}
diff --git a/libpod/state.go b/libpod/state.go
index d0ad1a1f8..5d704e69a 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -115,12 +115,20 @@ type State interface {
// answer is this: use this only very sparingly, and only if you really
// know what you're doing.
RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
- // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING.
+ // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
// This function is identical to RewriteContainerConfig, save for the
// fact that it is used with pods instead.
// It is subject to the same conditions as RewriteContainerConfig.
// Please do not use this unless you know what you're doing.
RewritePodConfig(pod *Pod, newCfg *PodConfig) error
+ // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
+ // This function is identical to RewriteContainerConfig, save for the
+ // fact that it is used with volumes instead.
+ // It is subject to the same conditions as RewriteContainerConfig.
+ // The exception is that volumes do not have IDs, so only volume name
+ // cannot be altered.
+ // Please do not use this unless you know what you're doing.
+ RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error
// Accepts full ID of pod.
// If the pod given is not in the set namespace, an error will be
diff --git a/libpod/volume.go b/libpod/volume.go
index 74126b49b..abfa7b3f4 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -2,6 +2,8 @@ package libpod
import (
"time"
+
+ "github.com/containers/libpod/libpod/lock"
)
// Volume is the type used to create named volumes
@@ -11,21 +13,35 @@ type Volume struct {
valid bool
runtime *Runtime
+ lock lock.Locker
}
// VolumeConfig holds the volume's config information
type VolumeConfig struct {
- // Name of the volume
+ // Name of the volume.
Name string `json:"name"`
-
- Labels map[string]string `json:"labels"`
- Driver string `json:"driver"`
- MountPoint string `json:"mountPoint"`
- CreatedTime time.Time `json:"createdAt,omitempty"`
- Options map[string]string `json:"options"`
- IsCtrSpecific bool `json:"ctrSpecific"`
- UID int `json:"uid"`
- GID int `json:"gid"`
+ // ID of the volume's lock.
+ LockID uint32 `json:"lockID"`
+ // Labels for the volume.
+ Labels map[string]string `json:"labels"`
+ // The volume driver. Empty string or local does not activate a volume
+ // driver, all other volumes will.
+ Driver string `json:"driver"`
+ // The location the volume is mounted at.
+ MountPoint string `json:"mountPoint"`
+ // Time the volume was created.
+ CreatedTime time.Time `json:"createdAt,omitempty"`
+ // Options to pass to the volume driver. For the local driver, this is
+ // a list of mount options. For other drivers, they are passed to the
+ // volume driver handling the volume.
+ Options map[string]string `json:"options"`
+ // Whether this volume was created for a specific container and will be
+ // removed with it.
+ IsCtrSpecific bool `json:"ctrSpecific"`
+ // UID the volume will be created as.
+ UID int `json:"uid"`
+ // GID the volume will be created as.
+ GID int `json:"gid"`
}
// Name retrieves the volume's name
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 1e0b84310..abb93a149 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -154,4 +154,12 @@ var _ = Describe("Podman run with volumes", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Not(Equal(0)))
})
+
+ It("podman run with volume flag and multiple named volumes", func() {
+ session := podmanTest.Podman([]string{"run", "--rm", "-v", "testvol1:/testvol1", "-v", "testvol2:/testvol2", ALPINE, "grep", "/testvol", "/proc/self/mountinfo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("/testvol1"))
+ Expect(session.OutputToString()).To(ContainSubstring("/testvol2"))
+ })
})
diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go
index 91604867d..02778d493 100644
--- a/test/e2e/systemd_test.go
+++ b/test/e2e/systemd_test.go
@@ -5,7 +5,10 @@ package integration
import (
"io/ioutil"
"os"
+ "strings"
+ "time"
+ "github.com/containers/libpod/pkg/cgroups"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -77,4 +80,49 @@ WantedBy=multi-user.target
status := SystemExec("bash", []string{"-c", "systemctl status redis"})
Expect(status.OutputToString()).To(ContainSubstring("active (running)"))
})
+
+ It("podman run container with systemd PID1", func() {
+ cgroupsv2, err := cgroups.IsCgroup2UnifiedMode()
+ Expect(err).To(BeNil())
+ if cgroupsv2 {
+ Skip("systemd test does not work in cgroups V2 mode yet")
+ }
+
+ systemdImage := "fedora"
+ pull := podmanTest.Podman([]string{"pull", systemdImage})
+ pull.WaitWithDefaultTimeout()
+ Expect(pull.ExitCode()).To(Equal(0))
+
+ ctrName := "testSystemd"
+ run := podmanTest.Podman([]string{"run", "--name", ctrName, "-t", "-i", "-d", systemdImage, "init"})
+ run.WaitWithDefaultTimeout()
+ Expect(run.ExitCode()).To(Equal(0))
+ ctrID := run.OutputToString()
+
+ logs := podmanTest.Podman([]string{"logs", ctrName})
+ logs.WaitWithDefaultTimeout()
+ Expect(logs.ExitCode()).To(Equal(0))
+
+ // Give container 10 seconds to start
+ started := false
+ for i := 0; i < 10; i++ {
+ runningCtrs := podmanTest.Podman([]string{"ps", "-q", "--no-trunc"})
+ runningCtrs.WaitWithDefaultTimeout()
+ Expect(runningCtrs.ExitCode()).To(Equal(0))
+
+ if strings.Contains(runningCtrs.OutputToString(), ctrID) {
+ started = true
+ break
+ }
+
+ time.Sleep(1 * time.Second)
+ }
+
+ Expect(started).To(BeTrue())
+
+ systemctl := podmanTest.Podman([]string{"exec", "-t", "-i", ctrName, "systemctl", "status", "--no-pager"})
+ systemctl.WaitWithDefaultTimeout()
+ Expect(systemctl.ExitCode()).To(Equal(0))
+ Expect(strings.Contains(systemctl.OutputToString(), "State:")).To(BeTrue())
+ })
})
diff --git a/test/system/065-cp.bats b/test/system/065-cp.bats
new file mode 100644
index 000000000..204065bdb
--- /dev/null
+++ b/test/system/065-cp.bats
@@ -0,0 +1,229 @@
+#!/usr/bin/env bats -*- bats -*-
+#
+# Tests for 'podman cp'
+#
+# ASSUMPTION FOR ALL THESE TESTS: /tmp in the container starts off empty
+#
+
+load helpers
+
+# Create two random-name random-content files in /tmp in the container
+# podman-cp them into the host using '/tmp/*', i.e. asking podman to
+# perform wildcard expansion in the container. We should get both
+# files copied into the host.
+@test "podman cp * - wildcard copy multiple files from container to host" {
+ skip_if_remote "podman-remote does not yet handle cp"
+
+ srcdir=$PODMAN_TMPDIR/cp-test-in
+ dstdir=$PODMAN_TMPDIR/cp-test-out
+ mkdir -p $srcdir $dstdir
+
+ rand_filename1=$(random_string 20)
+ rand_content1=$(random_string 50)
+ rand_filename2=$(random_string 20)
+ rand_content2=$(random_string 50)
+
+ run_podman run --name cpcontainer $IMAGE sh -c \
+ "echo $rand_content1 >/tmp/$rand_filename1;
+ echo $rand_content2 >/tmp/$rand_filename2"
+
+ run_podman cp 'cpcontainer:/tmp/*' $dstdir
+
+ test -e $dstdir/$rand_filename1 || die "file 1 not copied from container"
+ test -e $dstdir/$rand_filename2 || die "file 2 not copied from container"
+
+ is "$(<$dstdir/$rand_filename1)" "$rand_content1" "content of file 1"
+ is "$(<$dstdir/$rand_filename2)" "$rand_content2" "content of file 2"
+
+ run_podman rm cpcontainer
+}
+
+
+# Create a file on the host; make a symlink in the container pointing
+# into host-only space. Try to podman-cp that symlink. It should fail.
+@test "podman cp - will not recognize symlink pointing into host space" {
+ skip_if_remote "podman-remote does not yet handle cp"
+ skip "BROKEN: PLEASE ENABLE ONCE #3829 GETS FIXED"
+
+ srcdir=$PODMAN_TMPDIR/cp-test-in
+ dstdir=$PODMAN_TMPDIR/cp-test-out
+ mkdir -p $srcdir $dstdir
+ echo "this file is on the host" >$srcdir/hostfile
+
+ run_podman run --name cpcontainer $IMAGE \
+ sh -c "ln -s $srcdir/hostfile /tmp/badlink"
+ # This should fail because, from the container's perspective, the symlink
+ # points to a nonexistent file
+ run_podman 125 cp 'cpcontainer:/tmp/*' $dstdir/
+
+ # FIXME: this might not be the exactly correct error message
+ is "$output" ".*error evaluating symlinks.*lstat.*no such file or dir" \
+ "Expected error from copying invalid symlink"
+
+ # make sure there are no files in dstdir
+ is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
+
+ run_podman rm cpcontainer
+}
+
+
+# Issue #3829 - like the above, but with a level of indirection in the
+# wildcard expansion: create a file on the host; create a symlink in
+# the container named 'file1' pointing to this file; then another symlink
+# in the container pointing to 'file*' (file star). Try to podman-cp
+# this invalid double symlink. It must fail.
+@test "podman cp - will not expand globs in host space (#3829)" {
+ skip_if_remote "podman-remote does not yet handle cp"
+ skip "BROKEN: PLEASE ENABLE ONCE #3829 GETS FIXED"
+
+ srcdir=$PODMAN_TMPDIR/cp-test-in
+ dstdir=$PODMAN_TMPDIR/cp-test-out
+ mkdir -p $srcdir $dstdir
+ echo "This file is on the host" > $srcdir/hostfile
+
+ run_podman run --name cpcontainer $IMAGE \
+ sh -c "ln -s $srcdir/hostfile file1;ln -s file\* copyme"
+ run_podman 125 cp cpcontainer:copyme $dstdir
+
+ is "$output" ".*error evaluating symlinks.*lstat.*no such file or dir" \
+ "Expected error from copying invalid symlink"
+
+ # make sure there are no files in dstdir
+ is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
+
+ run_podman rm cpcontainer
+}
+
+
+# Another symlink into host space, this one named '*' (star). cp should fail.
+@test "podman cp - will not expand wildcard" {
+ skip_if_remote "podman-remote does not yet handle cp"
+
+ srcdir=$PODMAN_TMPDIR/cp-test-in
+ dstdir=$PODMAN_TMPDIR/cp-test-out
+ mkdir -p $srcdir $dstdir
+ echo "This file lives on the host" > $srcdir/hostfile
+
+ run_podman run --name cpcontainer $IMAGE \
+ sh -c "ln -s $srcdir/hostfile /tmp/\*"
+ run_podman 125 cp 'cpcontainer:/tmp/*' $dstdir
+
+ is "$output" ".*error evaluating symlinks.*lstat.*no such file or dir" \
+ "Expected error from copying invalid symlink"
+
+ # dstdir must be empty
+ is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
+
+ run_podman rm cpcontainer
+}
+
+###############################################################################
+# cp INTO container
+
+# THIS IS EXTREMELY WEIRD. Podman expands symlinks in weird ways.
+@test "podman cp into container: weird symlink expansion" {
+ skip_if_remote "podman-remote does not yet handle cp"
+
+ srcdir=$PODMAN_TMPDIR/cp-test-in
+ dstdir=$PODMAN_TMPDIR/cp-test-out
+ mkdir -p $srcdir $dstdir
+
+ rand_filename1=$(random_string 20)
+ rand_content1=$(random_string 50)
+ echo $rand_content1 > $srcdir/$rand_filename1
+
+ rand_filename2=$(random_string 20)
+ rand_content2=$(random_string 50)
+ echo $rand_content2 > $srcdir/$rand_filename2
+
+ rand_filename3=$(random_string 20)
+ rand_content3=$(random_string 50)
+ echo $rand_content3 > $srcdir/$rand_filename3
+
+ # Create tmp subdirectories in container, most with an invalid 'x' symlink
+ # Keep container running so we can exec into it.
+ run_podman run -d --name cpcontainer $IMAGE \
+ sh -c "mkdir /tmp/d1;ln -s /tmp/nonesuch1 /tmp/d1/x;
+ mkdir /tmp/d2;ln -s /tmp/nonesuch2 /tmp/d2/x;
+ mkdir /tmp/d3;
+ trap 'exit 0' 15;while :;do sleep 0.5;done"
+
+ # Copy file from host into container, into a file named 'x'
+ # Note that the second has a trailing slash; this will trigger mkdir
+ run_podman cp $srcdir/$rand_filename1 cpcontainer:/tmp/d1/x
+ is "$output" "" "output from podman cp 1"
+
+ run_podman cp $srcdir/$rand_filename2 cpcontainer:/tmp/d2/x/
+ is "$output" "" "output from podman cp 3"
+
+ run_podman cp $srcdir/$rand_filename3 cpcontainer:/tmp/d3/x
+ is "$output" "" "output from podman cp 3"
+
+ # Read back.
+ # In the first case, podman actually creates the file nonesuch1 (i.e.
+ # podman expands 'x -> nonesuch1' and, instead of overwriting x,
+ # creates an actual file).
+ run_podman exec cpcontainer cat /tmp/nonesuch1
+ is "$output" "$rand_content1" "cp creates destination file"
+
+ # In the second case, podman creates a directory nonesuch2, then
+ # creates a file with the same name as the input file. THIS IS WEIRD!
+ run_podman exec cpcontainer cat /tmp/nonesuch2/$rand_filename2
+ is "$output" "$rand_content2" "cp creates destination dir and file"
+
+ # In the third case, podman (correctly imo) creates a file named 'x'
+ run_podman exec cpcontainer cat /tmp/d3/x
+ is "$output" "$rand_content3" "cp creates file named x"
+
+ run_podman rm -f cpcontainer
+
+
+}
+
+
+# rhbz1741718 : file copied into container:/var/lib/foo appears as /foo
+# (docker only, never seems to have affected podman. Make sure it never does).
+@test "podman cp into a subdirectory matching GraphRoot" {
+ skip_if_remote "podman-remote does not yet handle cp"
+
+ # Create tempfile with random name and content
+ srcdir=$PODMAN_TMPDIR/cp-test-in
+ mkdir -p $srcdir
+ rand_filename=$(random_string 20)
+ rand_content=$(random_string 50)
+ echo $rand_content > $srcdir/$rand_filename
+ chmod 644 $srcdir/$rand_filename
+
+ # Determine path to podman storage (eg /var/lib/c/s, or $HOME/.local/...)
+ run_podman info --format '{{.store.GraphRoot}}'
+ graphroot=$output
+
+ # Create that directory in the container, and sleep (to keep container
+ # running, so we can exec into it). The trap/while is so podman-rm will
+ # run quickly instead of taking 10 seconds.
+ run_podman run -d --name cpcontainer $IMAGE sh -c \
+ "mkdir -p $graphroot; trap 'exit 0' 15;while :;do sleep 0.5;done"
+
+ # Copy from host into container.
+ run_podman cp $srcdir/$rand_filename cpcontainer:$graphroot/$rand_filename
+
+ # ls, and confirm it's there.
+ run_podman exec cpcontainer ls -l $graphroot/$rand_filename
+ is "$output" "-rw-r--r-- .* 1 .* root .* 51 .* $graphroot/$rand_filename" \
+ "File is copied into container in the correct (full) path"
+
+ # Confirm it has the expected content (this is unlikely to ever fail)
+ run_podman exec cpcontainer cat $graphroot/$rand_filename
+ is "$output" "$rand_content" "Contents of file copied into container"
+
+ run_podman rm -f cpcontainer
+}
+
+
+function teardown() {
+ # In case any test fails, clean up the container we left behind
+ run_podman rm -f cpcontainer
+ basic_teardown
+}
+
+# vim: filetype=sh