aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2022-02-23 16:43:02 -0500
committerGitHub <noreply@github.com>2022-02-23 16:43:02 -0500
commit49d511b6ee88ceff70cc6786c467bed39da35a61 (patch)
tree3e0556416de59ee73fadde87f93a450b83199c86
parent172b745d0e796327cc23c19e61973365bb1488ef (diff)
parentcfcc0d6398b2bbddace991ba36d92c631f2b5f45 (diff)
downloadpodman-49d511b6ee88ceff70cc6786c467bed39da35a61.tar.gz
podman-49d511b6ee88ceff70cc6786c467bed39da35a61.tar.bz2
podman-49d511b6ee88ceff70cc6786c467bed39da35a61.zip
Merge pull request #13329 from mheon/bump_401
Bump to v4.0.1
-rw-r--r--Makefile6
-rw-r--r--RELEASE_NOTES.md7
-rw-r--r--cmd/podman/main.go8
-rw-r--r--cmd/podman/play/kube.go6
-rw-r--r--contrib/modules-load.d/podman-iptables.conf5
-rw-r--r--docs/source/markdown/podman-play-kube.1.md5
-rw-r--r--libpod/container_inspect.go11
-rw-r--r--libpod/networking_linux.go2
-rw-r--r--libpod/oci_conmon_linux.go4
-rw-r--r--libpod/runtime.go11
-rw-r--r--libpod/runtime_cstorage.go12
-rw-r--r--libpod/runtime_ctr.go40
-rw-r--r--libpod/runtime_img.go3
-rw-r--r--libpod/runtime_pod.go18
-rw-r--r--libpod/runtime_pod_linux.go9
-rw-r--r--libpod/runtime_volume.go18
-rw-r--r--libpod/runtime_volume_linux.go3
-rw-r--r--pkg/domain/entities/play.go2
-rw-r--r--pkg/domain/infra/abi/play.go2
-rw-r--r--pkg/specgen/generate/kube/kube.go16
-rw-r--r--pkg/specgen/generate/kube/kube_test.go42
-rw-r--r--pkg/util/utils.go2
-rw-r--r--pkg/util/utils_supported.go50
-rw-r--r--podman.spec.rpkg6
-rw-r--r--test/e2e/play_build_test.go47
-rw-r--r--test/e2e/volume_plugin_test.go8
-rw-r--r--test/system/001-basic.bats20
-rw-r--r--test/system/070-build.bats21
-rw-r--r--test/system/300-cli-parsing.bats14
-rw-r--r--test/system/800-config.bats80
-rw-r--r--test/system/helpers.bash4
-rw-r--r--version/version.go2
32 files changed, 313 insertions, 171 deletions
diff --git a/Makefile b/Makefile
index 0a5389ce9..cb230d8e9 100644
--- a/Makefile
+++ b/Makefile
@@ -44,6 +44,7 @@ MANDIR ?= ${PREFIX}/share/man
SHAREDIR_CONTAINERS ?= ${PREFIX}/share/containers
ETCDIR ?= ${PREFIX}/etc
TMPFILESDIR ?= ${PREFIX}/lib/tmpfiles.d
+MODULESLOADDIR ?= ${PREFIX}/lib/modules-load.d
SYSTEMDDIR ?= ${PREFIX}/lib/systemd/system
USERSYSTEMDDIR ?= ${PREFIX}/lib/systemd/user
REMOTETAGS ?= remote exclude_graphdriver_btrfs btrfs_noversion exclude_graphdriver_devicemapper containers_image_openpgp
@@ -779,6 +780,11 @@ install.bin:
install ${SELINUXOPT} -m 755 -d ${DESTDIR}${TMPFILESDIR}
install ${SELINUXOPT} -m 644 contrib/tmpfile/podman.conf ${DESTDIR}${TMPFILESDIR}/podman.conf
+.PHONY: install.modules-load
+install.modules-load: # This should only be used by distros which might use iptables-legacy, this is not needed on RHEL
+ install ${SELINUXOPT} -m 755 -d ${DESTDIR}${MODULESLOADDIR}
+ install ${SELINUXOPT} -m 644 contrib/modules-load.d/podman-iptables.conf ${DESTDIR}${MODULESLOADDIR}/podman-iptables.conf
+
.PHONY: install.man
install.man:
install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(MANDIR)/man1
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 4c07b033a..bb062c483 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,12 @@
# Release Notes
+## 4.0.1
+### Bugfixes
+- Fixed a bug where the `podman play kube` command did not honor the `mountPropagation` field in Pod YAML ([#13322](https://github.com/containers/podman/issues/13322)).
+- Fixed a bug where the `--build=false` option to `podman play kube` was not honored ([#13285](https://github.com/containers/podman/issues/13285)).
+- Fixed a bug where a container using volumes from another container (via `--volumes-from`) could, under certain circumstances, exit with errors that it could not delete some volumes if the other container did not exit before it ([#12808](https://github.com/containers/podman/issues/12808)).
+- Fixed a bug where the `CONTAINERS_CONF` environment variable was not propagated to Conmon, which could result in Podman cleanup processes being run with incorrect configurations.
+
## 4.0.0
### Features
- Podman has seen an extensive rewrite of its network stack to add support for Netavark, a new tool for configuring container networks, in addition to the existing CNI stack. Netavark will be default on new installations when it is available.
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 9850f5d27..4f8131653 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -72,6 +72,8 @@ func parseCommands() *cobra.Command {
}
parent.AddCommand(c.Command)
+ c.Command.SetFlagErrorFunc(flagErrorFuncfunc)
+
// - templates need to be set here, as PersistentPreRunE() is
// not called when --help is used.
// - rootCmd uses cobra default template not ours
@@ -84,5 +86,11 @@ func parseCommands() *cobra.Command {
os.Exit(1)
}
+ rootCmd.SetFlagErrorFunc(flagErrorFuncfunc)
return rootCmd
}
+
+func flagErrorFuncfunc(c *cobra.Command, e error) error {
+ e = fmt.Errorf("%w\nSee '%s --help'", e, c.CommandPath())
+ return e
+}
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index ccf6ea861..1a430f2dc 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -27,6 +27,7 @@ type playKubeOptionsWrapper struct {
TLSVerifyCLI bool
CredentialsCLI string
StartCLI bool
+ BuildCLI bool
}
var (
@@ -117,7 +118,7 @@ func init() {
_ = kubeCmd.RegisterFlagCompletionFunc(configmapFlagName, completion.AutocompleteDefault)
buildFlagName := "build"
- flags.BoolVar(&kubeOptions.Build, buildFlagName, false, "Build all images in a YAML (given Containerfiles exist)")
+ flags.BoolVar(&kubeOptions.BuildCLI, buildFlagName, false, "Build all images in a YAML (given Containerfiles exist)")
}
if !registry.IsRemote() {
@@ -138,6 +139,9 @@ func kube(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("start") {
kubeOptions.Start = types.NewOptionalBool(kubeOptions.StartCLI)
}
+ if cmd.Flags().Changed("build") {
+ kubeOptions.Build = types.NewOptionalBool(kubeOptions.BuildCLI)
+ }
if kubeOptions.Authfile != "" {
if _, err := os.Stat(kubeOptions.Authfile); err != nil {
return err
diff --git a/contrib/modules-load.d/podman-iptables.conf b/contrib/modules-load.d/podman-iptables.conf
new file mode 100644
index 000000000..001ef8af8
--- /dev/null
+++ b/contrib/modules-load.d/podman-iptables.conf
@@ -0,0 +1,5 @@
+# On fedora 36 ip_tables is no longer auto loaded and rootless user have no permsissions to load it.
+# When we have actual nftables support in the future we might want to revisit this.
+# If you use iptables-nft this is not needed.
+ip_tables
+ip6_tables
diff --git a/docs/source/markdown/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md
index 6d02af80d..f85ea9046 100644
--- a/docs/source/markdown/podman-play-kube.1.md
+++ b/docs/source/markdown/podman-play-kube.1.md
@@ -67,7 +67,8 @@ like:
```
The build will consider `foobar` to be the context directory for the build. If there is an image in local storage
-called `foobar`, the image will not be built unless the `--build` flag is used.
+called `foobar`, the image will not be built unless the `--build` flag is used. Use `--build=false` to completely
+disable builds.
`Kubernetes ConfigMap`
@@ -115,7 +116,7 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
#### **--build**
-Build images even if they are found in the local storage.
+Build images even if they are found in the local storage. Use `--build=false` to completely disable builds.
#### **--cert-dir**=*path*
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 1344fc659..0bbfe3b70 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -51,6 +51,17 @@ func (c *Container) Inspect(size bool) (*define.InspectContainerData, error) {
return c.inspectLocked(size)
}
+func (c *Container) volumesFrom() ([]string, error) {
+ ctrSpec, err := c.specFromState()
+ if err != nil {
+ return nil, err
+ }
+ if ctrs, ok := ctrSpec.Annotations[define.InspectAnnotationVolumesFrom]; ok {
+ return strings.Split(ctrs, ","), nil
+ }
+ return nil, nil
+}
+
func (c *Container) getContainerInspectData(size bool, driverData *define.DriverData) (*define.InspectContainerData, error) {
config := c.config
runtimeInfo := c.state
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 19d5c7f76..29b9941fe 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -320,7 +320,7 @@ func (r *RootlessNetNS) Cleanup(runtime *Runtime) error {
// only if the netns is empty we know that we do not need cleanup
return c.state.NetNS != nil
}
- ctrs, err := runtime.GetContainersWithoutLock(activeNetns)
+ ctrs, err := runtime.GetContainers(activeNetns)
if err != nil {
return err
}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 268a301fb..a328f7621 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -1318,6 +1318,10 @@ func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string)
env = append(env, e)
}
}
+ conf, ok := os.LookupEnv("CONTAINERS_CONF")
+ if ok {
+ env = append(env, fmt.Sprintf("CONTAINERS_CONF=%s", conf))
+ }
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
diff --git a/libpod/runtime.go b/libpod/runtime.go
index dcf8c83f1..d19997709 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -11,7 +11,6 @@ import (
"regexp"
"strconv"
"strings"
- "sync"
"syscall"
"time"
@@ -109,7 +108,6 @@ type Runtime struct {
// and remains true until the runtime is shut down (rendering its
// storage unusable). When valid is false, the runtime cannot be used.
valid bool
- lock sync.RWMutex
// mechanism to read and write even logs
eventer events.Eventer
@@ -713,9 +711,6 @@ func (r *Runtime) TmpDir() (string, error) {
// Note that the returned value is not a copy and must hence
// only be used in a reading fashion.
func (r *Runtime) GetConfigNoCopy() (*config.Config, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -810,9 +805,6 @@ func (r *Runtime) DeferredShutdown(force bool) {
// cleaning up; if force is false, an error will be returned if there are
// still containers running or mounted
func (r *Runtime) Shutdown(force bool) error {
- r.lock.Lock()
- defer r.lock.Unlock()
-
if !r.valid {
return define.ErrRuntimeStopped
}
@@ -1016,9 +1008,6 @@ func (r *Runtime) RunRoot() string {
// If the given ID does not correspond to any existing Pod or Container,
// ErrNoSuchCtr is returned.
func (r *Runtime) GetName(id string) (string, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return "", define.ErrRuntimeStopped
}
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
index 026cab3c5..1c528e1b8 100644
--- a/libpod/runtime_cstorage.go
+++ b/libpod/runtime_cstorage.go
@@ -21,9 +21,6 @@ type StorageContainer struct {
// ListStorageContainers lists all containers visible to c/storage.
func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
finalCtrs := []*StorageContainer{}
ctrs, err := r.store.Containers()
@@ -61,15 +58,6 @@ func (r *Runtime) StorageContainer(idOrName string) (*storage.Container, error)
// Accepts ID or full name of container.
// If force is set, the container will be unmounted first to ensure removal.
func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
- r.lock.Lock()
- defer r.lock.Unlock()
-
- return r.removeStorageContainer(idOrName, force)
-}
-
-// Internal function to remove the container storage without
-// locking the runtime.
-func (r *Runtime) removeStorageContainer(idOrName string, force bool) error {
targetID, err := r.store.Lookup(idOrName)
if err != nil {
if errors.Cause(err) == storage.ErrLayerUnknown {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 44364100e..fc1a688fb 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -42,8 +42,6 @@ type ContainerFilter func(*Container) bool
// NewContainer creates a new container from a given OCI config.
func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, spec *specgen.SpecGenerator, infra bool, options ...CtrCreateOption) (*Container, error) {
- r.lock.Lock()
- defer r.lock.Unlock()
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -81,8 +79,6 @@ func (r *Runtime) PrepareVolumeOnCreateContainer(ctx context.Context, ctr *Conta
// RestoreContainer re-creates a container from an imported checkpoint
func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
- r.lock.Lock()
- defer r.lock.Unlock()
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -545,8 +541,6 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// be removed also if and only if the container is the sole user
// Otherwise, RemoveContainer will return an error if the container is running
func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool, timeout *uint) error {
- r.lock.Lock()
- defer r.lock.Unlock()
return r.removeContainer(ctx, c, force, removeVolume, false, timeout)
}
@@ -768,6 +762,14 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
continue
}
if err := runtime.removeVolume(ctx, volume, false, timeout); err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
+ if errors.Cause(err) == define.ErrVolumeBeingUsed {
+ // Ignore error, since podman will report original error
+ volumesFrom, _ := c.volumesFrom()
+ if len(volumesFrom) > 0 {
+ logrus.Debugf("Cleanup volume not possible since volume is in use (%s)", v)
+ continue
+ }
+ }
logrus.Errorf("Cleanup volume (%s): %v", v, err)
}
}
@@ -784,8 +786,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// If removeVolume is specified, named volumes used by the container will
// be removed also if and only if the container is the sole user.
func (r *Runtime) EvictContainer(ctx context.Context, idOrName string, removeVolume bool) (string, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
return r.evictContainer(ctx, idOrName, removeVolume)
}
@@ -894,7 +894,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
}
// Remove container from c/storage
- if err := r.removeStorageContainer(id, true); err != nil {
+ if err := r.RemoveStorageContainer(id, true); err != nil {
if cleanupErr == nil {
cleanupErr = err
}
@@ -972,9 +972,6 @@ func (r *Runtime) RemoveDepend(ctx context.Context, rmCtr *Container, force bool
// GetContainer retrieves a container by its ID
func (r *Runtime) GetContainer(id string) (*Container, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -984,9 +981,6 @@ func (r *Runtime) GetContainer(id string) (*Container, error) {
// HasContainer checks if a container with the given ID is present
func (r *Runtime) HasContainer(id string) (bool, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return false, define.ErrRuntimeStopped
}
@@ -997,9 +991,6 @@ func (r *Runtime) HasContainer(id string) (bool, error) {
// LookupContainer looks up a container by its name or a partial ID
// If a partial ID is not unique, an error will be returned
func (r *Runtime) LookupContainer(idOrName string) (*Container, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -1009,9 +1000,6 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) {
// LookupContainerId looks up a container id by its name or a partial ID
// If a partial ID is not unique, an error will be returned
func (r *Runtime) LookupContainerID(idOrName string) (string, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return "", define.ErrRuntimeStopped
}
@@ -1023,13 +1011,6 @@ func (r *Runtime) LookupContainerID(idOrName string) (string, error) {
// the output. Multiple filters are handled by ANDing their output, so only
// containers matching all filters are returned
func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
- return r.GetContainersWithoutLock(filters...)
-}
-
-// GetContainersWithoutLock is same as GetContainers but without lock
-func (r *Runtime) GetContainersWithoutLock(filters ...ContainerFilter) ([]*Container, error) {
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -1107,9 +1088,6 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
// GetExecSessionContainer gets the container that a given exec session ID is
// attached to.
func (r *Runtime) GetExecSessionContainer(id string) (*Container, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index e3b439dd1..54eadf6b8 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -25,9 +25,6 @@ import (
// we can use the libpod-internal removal logic.
func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage.RemoveContainerFunc {
return func(imageID string) error {
- r.lock.Lock()
- defer r.lock.Unlock()
-
if !r.valid {
return define.ErrRuntimeStopped
}
diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go
index 11891630a..dca0ffc8a 100644
--- a/libpod/runtime_pod.go
+++ b/libpod/runtime_pod.go
@@ -27,9 +27,6 @@ type PodFilter func(*Pod) bool
// being removed
// Otherwise, the pod will not be removed if any containers are running
func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error {
- r.lock.Lock()
- defer r.lock.Unlock()
-
if !r.valid {
return define.ErrRuntimeStopped
}
@@ -50,9 +47,6 @@ func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// GetPod retrieves a pod by its ID
func (r *Runtime) GetPod(id string) (*Pod, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -62,9 +56,6 @@ func (r *Runtime) GetPod(id string) (*Pod, error) {
// HasPod checks to see if a pod with the given ID exists
func (r *Runtime) HasPod(id string) (bool, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return false, define.ErrRuntimeStopped
}
@@ -75,9 +66,6 @@ func (r *Runtime) HasPod(id string) (bool, error) {
// LookupPod retrieves a pod by its name or a partial ID
// If a partial ID is not unique, an error will be returned
func (r *Runtime) LookupPod(idOrName string) (*Pod, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -111,9 +99,6 @@ func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) {
// GetAllPods retrieves all pods
func (r *Runtime) GetAllPods() ([]*Pod, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -148,9 +133,6 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) {
pods []string
runningPods []*Pod
)
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 7bc675af7..155ad5c2d 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -22,9 +22,6 @@ import (
// NewPod makes a new, empty pod
func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, options ...PodCreateOption) (_ *Pod, deferredErr error) {
- r.lock.Lock()
- defer r.lock.Unlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -151,9 +148,6 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
// AddInfra adds the created infra container to the pod state
func (r *Runtime) AddInfra(ctx context.Context, pod *Pod, infraCtr *Container) (*Pod, error) {
- r.lock.Lock()
- defer r.lock.Unlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -167,9 +161,6 @@ func (r *Runtime) AddInfra(ctx context.Context, pod *Pod, infraCtr *Container) (
// SavePod is a helper function to save the pod state from outside of libpod
func (r *Runtime) SavePod(pod *Pod) error {
- r.lock.Lock()
- defer r.lock.Unlock()
-
if !r.valid {
return define.ErrRuntimeStopped
}
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index a3be0ff5b..21bf8aefc 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -22,9 +22,6 @@ type VolumeFilter func(*Volume) bool
// RemoveVolume removes a volumes
func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool, timeout *uint) error {
- r.lock.Lock()
- defer r.lock.Unlock()
-
if !r.valid {
return define.ErrRuntimeStopped
}
@@ -41,9 +38,6 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool, timeo
// GetVolume retrieves a volume given its full name.
func (r *Runtime) GetVolume(name string) (*Volume, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -58,9 +52,6 @@ func (r *Runtime) GetVolume(name string) (*Volume, error) {
// LookupVolume retrieves a volume by unambiguous partial name.
func (r *Runtime) LookupVolume(name string) (*Volume, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -75,9 +66,6 @@ func (r *Runtime) LookupVolume(name string) (*Volume, error) {
// HasVolume checks to see if a volume with the given name exists
func (r *Runtime) HasVolume(name string) (bool, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return false, define.ErrRuntimeStopped
}
@@ -90,9 +78,6 @@ func (r *Runtime) HasVolume(name string) (bool, error) {
// output. If multiple filters are used, a volume will be returned if
// any of the filters are matched
func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
@@ -123,9 +108,6 @@ func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) {
// GetAllVolumes retrieves all the volumes
func (r *Runtime) GetAllVolumes() ([]*Volume, error) {
- r.lock.RLock()
- defer r.lock.RUnlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 5fd68fffb..c4fe3db90 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -21,9 +21,6 @@ import (
// NewVolume creates a new empty volume
func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
- r.lock.Lock()
- defer r.lock.Unlock()
-
if !r.valid {
return nil, define.ErrRuntimeStopped
}
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index 39234caf8..43fa3a712 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -11,7 +11,7 @@ type PlayKubeOptions struct {
// Authfile - path to an authentication file.
Authfile string
// Indicator to build all images with Containerfile or Dockerfile
- Build bool
+ Build types.OptionalBool
// CertDir - to a directory containing TLS certifications and keys.
CertDir string
// Down indicates whether to bring contents of a yaml file "down"
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 86a60e92d..308a1d0ee 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -476,7 +476,7 @@ func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string,
if err != nil {
return nil, nil, err
}
- if (len(buildFile) > 0 && !existsLocally) || (len(buildFile) > 0 && options.Build) {
+ if (len(buildFile) > 0) && ((!existsLocally && options.Build != types.OptionalBoolFalse) || (options.Build == types.OptionalBoolTrue)) {
buildOpts := new(buildahDefine.BuildOptions)
commonOpts := new(buildahDefine.CommonBuildOptions)
buildOpts.ConfigureNetwork = buildahDefine.NetworkDefault
diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go
index 2fd149b49..9872a7f40 100644
--- a/pkg/specgen/generate/kube/kube.go
+++ b/pkg/specgen/generate/kube/kube.go
@@ -319,7 +319,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
continue
}
- dest, options, err := parseMountPath(volume.MountPath, volume.ReadOnly)
+ dest, options, err := parseMountPath(volume.MountPath, volume.ReadOnly, volume.MountPropagation)
if err != nil {
return nil, err
}
@@ -385,7 +385,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
return s, nil
}
-func parseMountPath(mountPath string, readOnly bool) (string, []string, error) {
+func parseMountPath(mountPath string, readOnly bool, propagationMode *v1.MountPropagationMode) (string, []string, error) {
options := []string{}
splitVol := strings.Split(mountPath, ":")
if len(splitVol) > 2 {
@@ -405,6 +405,18 @@ func parseMountPath(mountPath string, readOnly bool) (string, []string, error) {
if err != nil {
return "", opts, errors.Wrapf(err, "parsing MountOptions")
}
+ if propagationMode != nil {
+ switch *propagationMode {
+ case v1.MountPropagationNone:
+ opts = append(opts, "private")
+ case v1.MountPropagationHostToContainer:
+ opts = append(opts, "rslave")
+ case v1.MountPropagationBidirectional:
+ opts = append(opts, "rshared")
+ default:
+ return "", opts, errors.Errorf("unknown propagation mode %q", *propagationMode)
+ }
+ }
return dest, opts, nil
}
diff --git a/pkg/specgen/generate/kube/kube_test.go b/pkg/specgen/generate/kube/kube_test.go
new file mode 100644
index 000000000..62793ebb6
--- /dev/null
+++ b/pkg/specgen/generate/kube/kube_test.go
@@ -0,0 +1,42 @@
+package kube
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ //"github.com/stretchr/testify/require"
+)
+
+func testPropagation(t *testing.T, propagation v1.MountPropagationMode, expected string) {
+ dest, options, err := parseMountPath("/to", false, &propagation)
+ assert.NoError(t, err)
+ assert.Equal(t, dest, "/to")
+ assert.Contains(t, options, expected)
+}
+
+func TestParseMountPathPropagation(t *testing.T) {
+ testPropagation(t, v1.MountPropagationNone, "private")
+ testPropagation(t, v1.MountPropagationHostToContainer, "rslave")
+ testPropagation(t, v1.MountPropagationBidirectional, "rshared")
+
+ prop := v1.MountPropagationMode("SpaceWave")
+ _, _, err := parseMountPath("/to", false, &prop)
+ assert.Error(t, err)
+
+ _, options, err := parseMountPath("/to", false, nil)
+ assert.NoError(t, err)
+ assert.NotContains(t, options, "private")
+ assert.NotContains(t, options, "rslave")
+ assert.NotContains(t, options, "rshared")
+}
+
+func TestParseMountPathRO(t *testing.T) {
+ _, options, err := parseMountPath("/to", true, nil)
+ assert.NoError(t, err)
+ assert.Contains(t, options, "ro")
+
+ _, options, err = parseMountPath("/to", false, nil)
+ assert.NoError(t, err)
+ assert.NotContains(t, options, "ro")
+}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 925ff9830..bdd1e1383 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -463,8 +463,6 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin
var (
rootlessConfigHomeDirOnce sync.Once
rootlessConfigHomeDir string
- rootlessRuntimeDirOnce sync.Once
- rootlessRuntimeDir string
)
type tomlOptionsConfig struct {
diff --git a/pkg/util/utils_supported.go b/pkg/util/utils_supported.go
index 848b35a45..e9d6bfa31 100644
--- a/pkg/util/utils_supported.go
+++ b/pkg/util/utils_supported.go
@@ -6,67 +6,21 @@ package util
// should work to take darwin from this
import (
- "fmt"
"os"
"path/filepath"
"syscall"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
// GetRuntimeDir returns the runtime directory
func GetRuntimeDir() (string, error) {
- var rootlessRuntimeDirError error
-
if !rootless.IsRootless() {
return "", nil
}
-
- rootlessRuntimeDirOnce.Do(func() {
- runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
- uid := fmt.Sprintf("%d", rootless.GetRootlessUID())
- if runtimeDir == "" {
- tmpDir := filepath.Join("/run", "user", uid)
- if err := os.MkdirAll(tmpDir, 0700); err != nil {
- logrus.Debug(err)
- }
- st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && (st.Mode().Perm()&0700 == 0700) {
- runtimeDir = tmpDir
- }
- }
- if runtimeDir == "" {
- tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid))
- if err := os.MkdirAll(tmpDir, 0700); err != nil {
- logrus.Debug(err)
- }
- st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && (st.Mode().Perm()&0700 == 0700) {
- runtimeDir = tmpDir
- }
- }
- if runtimeDir == "" {
- home := os.Getenv("HOME")
- if home == "" {
- rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- return
- }
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home)
- return
- }
- runtimeDir = filepath.Join(resolvedHome, "rundir")
- }
- rootlessRuntimeDir = runtimeDir
- })
-
- if rootlessRuntimeDirError != nil {
- return "", rootlessRuntimeDirError
- }
- return rootlessRuntimeDir, nil
+ return cutil.GetRuntimeDir()
}
// GetRootlessConfigHomeDir returns the config home directory when running as non root
diff --git a/podman.spec.rpkg b/podman.spec.rpkg
index 4068b3a81..a6f66ce98 100644
--- a/podman.spec.rpkg
+++ b/podman.spec.rpkg
@@ -203,6 +203,9 @@ PODMAN_VERSION=%{version} %{__make} DESTDIR=%{buildroot} PREFIX=%{_prefix} ETCDI
install.docker \
install.docker-docs \
install.remote \
+%if 0%{?fedora} >= 36
+ install.modules-load
+%endif
install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
cp -pav test/system %{buildroot}/%{_datadir}/%{name}/test/
@@ -239,6 +242,9 @@ done
%{_userunitdir}/%{name}.socket
%{_userunitdir}/%{name}-restart.service
%{_usr}/lib/tmpfiles.d/%{name}.conf
+%if 0%{?fedora} >= 36
+ %{_usr}/lib/modules-load.d/%{name}-iptables.conf
+%endif
%files docker
%{_bindir}/docker
diff --git a/test/e2e/play_build_test.go b/test/e2e/play_build_test.go
index 70e042b4d..849ba7162 100644
--- a/test/e2e/play_build_test.go
+++ b/test/e2e/play_build_test.go
@@ -212,6 +212,53 @@ LABEL marge=mom
Expect(inspectData[0].Config.Labels).To(HaveKeyWithValue("marge", "mom"))
})
+ It("Do not build image at all if --build=false", func() {
+ // Setup
+ yamlDir := filepath.Join(tempdir, RandomString(12))
+ err := os.Mkdir(yamlDir, 0755)
+ Expect(err).To(BeNil(), "mkdir "+yamlDir)
+ err = writeYaml(testYAML, filepath.Join(yamlDir, "top.yaml"))
+ Expect(err).To(BeNil())
+
+ // build an image called foobar but make sure it doesn't have
+ // the same label as the yaml buildfile, so we can check that
+ // the image is NOT rebuilt.
+ err = writeYaml(prebuiltImage, filepath.Join(yamlDir, "Containerfile"))
+ Expect(err).To(BeNil())
+
+ app1Dir := filepath.Join(yamlDir, "foobar")
+ err = os.Mkdir(app1Dir, 0755)
+ Expect(err).To(BeNil())
+ err = writeYaml(playBuildFile, filepath.Join(app1Dir, "Containerfile"))
+ Expect(err).To(BeNil())
+ // Write a file to be copied
+ err = writeYaml(copyFile, filepath.Join(app1Dir, "copyfile"))
+ Expect(err).To(BeNil())
+
+ // Switch to temp dir and restore it afterwards
+ cwd, err := os.Getwd()
+ Expect(err).To(BeNil())
+ Expect(os.Chdir(yamlDir)).To(BeNil())
+ defer func() { (Expect(os.Chdir(cwd)).To(BeNil())) }()
+
+ // Build the image into the local store
+ build := podmanTest.Podman([]string{"build", "-t", "foobar", "-f", "Containerfile"})
+ build.WaitWithDefaultTimeout()
+ Expect(build).Should(Exit(0))
+
+ session := podmanTest.Podman([]string{"play", "kube", "--build=false", "top.yaml"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ inspect := podmanTest.Podman([]string{"container", "inspect", "top_pod-foobar"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect).Should(Exit(0))
+ inspectData := inspect.InspectContainerToJSON()
+ Expect(len(inspectData)).To(BeNumerically(">", 0))
+ Expect(inspectData[0].Config.Labels).To(Not(HaveKey("homer")))
+ Expect(inspectData[0].Config.Labels).To(HaveKeyWithValue("marge", "mom"))
+ })
+
It("--build should override image in store", func() {
// Setup
yamlDir := filepath.Join(tempdir, RandomString(12))
diff --git a/test/e2e/volume_plugin_test.go b/test/e2e/volume_plugin_test.go
index 959a44bb8..fd205805d 100644
--- a/test/e2e/volume_plugin_test.go
+++ b/test/e2e/volume_plugin_test.go
@@ -166,11 +166,13 @@ var _ = Describe("Podman volume plugins", func() {
create.WaitWithDefaultTimeout()
Expect(create).Should(Exit(0))
- ctr1 := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", fmt.Sprintf("%v:/test", volName), ALPINE, "sh", "-c", "touch /test/testfile && echo helloworld > /test/testfile"})
+ ctr1Name := "ctr1"
+ ctr1 := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "--name", ctr1Name, "-v", fmt.Sprintf("%v:/test", volName), ALPINE, "sh", "-c", "touch /test/testfile && echo helloworld > /test/testfile"})
ctr1.WaitWithDefaultTimeout()
Expect(ctr1).Should(Exit(0))
- ctr2 := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "-v", fmt.Sprintf("%v:/test", volName), ALPINE, "cat", "/test/testfile"})
+ ctr2Name := "ctr2"
+ ctr2 := podmanTest.Podman([]string{"run", "--security-opt", "label=disable", "--name", ctr2Name, "-v", fmt.Sprintf("%v:/test", volName), ALPINE, "cat", "/test/testfile"})
ctr2.WaitWithDefaultTimeout()
Expect(ctr2).Should(Exit(0))
Expect(ctr2.OutputToString()).To(ContainSubstring("helloworld"))
@@ -178,7 +180,7 @@ var _ = Describe("Podman volume plugins", func() {
// HACK: `volume rm -f` is timing out trying to remove containers using the volume.
// Solution: remove them manually...
// TODO: fix this when I get back
- rmAll := podmanTest.Podman([]string{"rm", "-af"})
+ rmAll := podmanTest.Podman([]string{"rm", "-f", ctr2Name, ctr1Name})
rmAll.WaitWithDefaultTimeout()
Expect(rmAll).Should(Exit(0))
})
diff --git a/test/system/001-basic.bats b/test/system/001-basic.bats
index 9b0a71285..748377e4b 100644
--- a/test/system/001-basic.bats
+++ b/test/system/001-basic.bats
@@ -33,6 +33,23 @@ function setup() {
fi
}
+@test "podman info" {
+ # These will be displayed on the test output stream, offering an
+ # at-a-glance overview of important system configuration details
+ local -a want=(
+ 'Arch:{{.Host.Arch}}'
+ 'OS:{{.Host.Distribution.Distribution}}{{.Host.Distribution.Version}}'
+ 'Runtime:{{.Host.OCIRuntime.Name}}'
+ 'Rootless:{{.Host.Security.Rootless}}'
+ 'Events:{{.Host.EventLogger}}'
+ 'Logdriver:{{.Host.LogDriver}}'
+ 'Cgroups:{{.Host.CgroupsVersion}}+{{.Host.CgroupManager}}'
+ 'Net:{{.Host.NetworkBackend}}'
+ )
+ run_podman info --format "$(IFS='/' echo ${want[@]})"
+ echo "# $output" >&3
+}
+
@test "podman --context emits reasonable output" {
# All we care about here is that the command passes
@@ -88,7 +105,8 @@ function setup() {
# ...but no matter what, --remote is never allowed after subcommand
PODMAN="${podman_non_remote} ${podman_args[@]}" run_podman 125 version --remote
- is "$output" "Error: unknown flag: --remote" "podman version --remote"
+ is "$output" "Error: unknown flag: --remote
+See 'podman version --help'" "podman version --remote"
}
@test "podman-remote: defaults" {
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index a95acd986..c963d8325 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -1016,6 +1016,27 @@ EOF
run_podman build -t build_test $tmpdir/link
}
+@test "podman build --volumes-from conflict" {
+ rand_content=$(random_string 50)
+
+ tmpdir=$PODMAN_TMPDIR/build-test
+ mkdir -p $tmpdir
+ dockerfile=$tmpdir/Dockerfile
+ cat >$dockerfile <<EOF
+FROM $IMAGE
+VOLUME /vol
+EOF
+
+ run_podman build -t build_test $tmpdir
+ is "$output" ".*COMMIT" "COMMIT seen in log"
+
+ run_podman run -d --name test_ctr build_test top
+ run_podman run --rm --volumes-from test_ctr $IMAGE echo $rand_content
+ is "$output" "$rand_content" "No error should be thrown about volume in use"
+
+ run_podman rmi -f build_test
+}
+
function teardown() {
# A timeout or other error in 'build' can leave behind stale images
# that podman can't even see and which will cascade into subsequent
diff --git a/test/system/300-cli-parsing.bats b/test/system/300-cli-parsing.bats
index 92c073102..ec493d3d8 100644
--- a/test/system/300-cli-parsing.bats
+++ b/test/system/300-cli-parsing.bats
@@ -12,4 +12,18 @@ load helpers
run_podman run --rm --label 'true="false"' $IMAGE true
}
+@test "podman flag error" {
+ local name="podman"
+ if is_remote; then
+ name="podman-remote"
+ fi
+ run_podman 125 run -h
+ is "$output" "Error: flag needs an argument: 'h' in -h
+See '$name run --help'" "expected error output"
+
+ run_podman 125 bad --invalid
+ is "$output" "Error: unknown flag: --invalid
+See '$name --help'" "expected error output"
+}
+
# vim: filetype=sh
diff --git a/test/system/800-config.bats b/test/system/800-config.bats
new file mode 100644
index 000000000..f5b4e9570
--- /dev/null
+++ b/test/system/800-config.bats
@@ -0,0 +1,80 @@
+#!/usr/bin/env bats -*- bats -*-
+#
+# Test specific configuration options and overrides
+#
+
+load helpers
+
+@test "podman CONTAINERS_CONF - CONTAINERS_CONF in conmon" {
+ skip_if_remote "can't check conmon environment over remote"
+
+ # Get the normal runtime for this host
+ run_podman info --format '{{ .Host.OCIRuntime.Name }}'
+ runtime="$output"
+ run_podman info --format "{{ .Host.OCIRuntime.Path }}"
+ ocipath="$output"
+
+ # Make an innocuous containers.conf in a non-standard location
+ conf_tmp="$PODMAN_TMPDIR/containers.conf"
+ cat >$conf_tmp <<EOF
+[engine]
+runtime="$runtime"
+[engine.runtimes]
+$runtime = ["$ocipath"]
+EOF
+ CONTAINERS_CONF="$conf_tmp" run_podman run -d $IMAGE sleep infinity
+ cid="$output"
+
+ CONTAINERS_CONF="$conf_tmp" run_podman inspect "$cid" --format "{{ .State.ConmonPid }}"
+ conmon="$output"
+
+ output="$(tr '\0' '\n' < /proc/$conmon/environ | grep '^CONTAINERS_CONF=')"
+ is "$output" "CONTAINERS_CONF=$conf_tmp"
+
+ # Clean up
+ # Oddly, sleep can't be interrupted with SIGTERM, so we need the
+ # "-f -t 0" to force a SIGKILL
+ CONTAINERS_CONF="$conf_tmp" run_podman rm -f -t 0 "$cid"
+}
+
+@test "podman CONTAINERS_CONF - override runtime name" {
+ skip_if_remote "Can't set CONTAINERS_CONF over remote"
+
+ # Get the path of the normal runtime
+ run_podman info --format "{{ .Host.OCIRuntime.Path }}"
+ ocipath="$output"
+
+ export conf_tmp="$PODMAN_TMPDIR/nonstandard_runtime_name.conf"
+ cat > $conf_tmp <<EOF
+[engine]
+runtime = "nonstandard_runtime_name"
+[engine.runtimes]
+nonstandard_runtime_name = ["$ocipath"]
+EOF
+
+ CONTAINERS_CONF="$conf_tmp" run_podman run -d --rm $IMAGE true
+ cid="$output"
+
+ # We need to wait for the container to finish before we can check
+ # if it was cleaned up properly. But in the common case that the
+ # container completes fast, and the cleanup *did* happen properly
+ # the container is now gone. So, we need to ignore "no such
+ # container" errors from podman wait.
+ CONTAINERS_CONF="$conf_tmp" run_podman '?' wait "$cid"
+ if [[ $status != 0 ]]; then
+ is "$output" "Error:.*no such container" "unexpected error from podman wait"
+ fi
+
+ # The --rm option means the container should no longer exist.
+ # However https://github.com/containers/podman/issues/12917 meant
+ # that the container cleanup triggered by conmon's --exit-cmd
+ # could fail, leaving the container in place.
+ #
+ # We verify that the container is indeed gone, by checking that a
+ # podman rm *fails* here - and it has the side effect of cleaning
+ # up in the case this test fails.
+ CONTAINERS_CONF="$conf_tmp" run_podman 1 rm "$cid"
+ is "$output" "Error:.*no such container"
+}
+
+# vim: filetype=sh
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index c622a5172..221315b97 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -37,9 +37,6 @@ fi
# while retaining the ability to include these if they so desire.
# Some CI systems set this to runc, overriding the default crun.
-# Although it would be more elegant to override options in run_podman(),
-# we instead override $PODMAN itself because some tests (170-run-userns)
-# have to invoke $PODMAN directly.
if [[ -n $OCI_RUNTIME ]]; then
if [[ -z $CONTAINERS_CONF ]]; then
# FIXME: BATS provides no mechanism for end-of-run cleanup[1]; how
@@ -111,6 +108,7 @@ function basic_teardown() {
echo "# [teardown]" >&2
run_podman '?' pod rm -t 0 --all --force --ignore
run_podman '?' rm -t 0 --all --force --ignore
+ run_podman '?' network prune --force
command rm -rf $PODMAN_TMPDIR
}
diff --git a/version/version.go b/version/version.go
index 3ec5e6d84..392111eb6 100644
--- a/version/version.go
+++ b/version/version.go
@@ -27,7 +27,7 @@ const (
// NOTE: remember to bump the version at the top
// of the top-level README.md file when this is
// bumped.
-var Version = semver.MustParse("4.0.1-dev")
+var Version = semver.MustParse("4.0.2-dev")
// See https://docs.docker.com/engine/api/v1.40/
// libpod compat handlers are expected to honor docker API versions