summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman-mac-helper/install.go2
-rw-r--r--cmd/podman/common/create.go16
-rw-r--r--cmd/podman/containers/clone.go2
-rw-r--r--cmd/podman/containers/create.go5
-rw-r--r--cmd/podman/containers/run.go5
-rw-r--r--cmd/podman/system/df.go48
-rw-r--r--docs/source/markdown/podman-create.1.md8
-rw-r--r--docs/source/markdown/podman-pod-clone.1.md14
-rw-r--r--docs/source/markdown/podman-pod-create.1.md14
-rw-r--r--docs/source/markdown/podman-run.1.md10
-rw-r--r--docs/tutorials/remote_client.md2
-rw-r--r--go.mod2
-rw-r--r--go.sum5
-rw-r--r--hack/podman-registry-go/registry.go16
-rw-r--r--libpod/container.go2
-rw-r--r--libpod/container_api.go15
-rw-r--r--libpod/define/healthchecks.go10
-rw-r--r--libpod/define/pod_inspect.go2
-rw-r--r--libpod/healthcheck.go6
-rw-r--r--libpod/pod.go17
-rw-r--r--libpod/pod_api.go1
-rw-r--r--libpod/runtime.go108
-rw-r--r--libpod/runtime_cstorage.go27
-rw-r--r--libpod/runtime_ctr.go96
-rw-r--r--libpod/runtime_img.go13
-rw-r--r--libpod/runtime_migrate.go15
-rw-r--r--libpod/runtime_pod.go7
-rw-r--r--libpod/runtime_pod_linux.go42
-rw-r--r--libpod/runtime_renumber.go9
-rw-r--r--libpod/runtime_volume.go4
-rw-r--r--libpod/runtime_volume_linux.go52
-rw-r--r--pkg/api/handlers/compat/containers.go30
-rw-r--r--pkg/api/handlers/compat/networks.go46
-rw-r--r--pkg/api/handlers/libpod/pods.go67
-rw-r--r--pkg/api/handlers/libpod/volumes.go14
-rw-r--r--pkg/api/handlers/utils/errors.go26
-rw-r--r--pkg/bindings/manifests/manifests.go2
-rw-r--r--pkg/domain/infra/abi/containers.go114
-rw-r--r--pkg/errorhandling/errorhandling.go21
-rw-r--r--pkg/errorhandling/errorhandling_test.go53
-rw-r--r--pkg/k8s.io/apimachinery/pkg/api/resource/amount.go2
-rw-r--r--pkg/k8s.io/apimachinery/pkg/api/resource/math.go4
-rw-r--r--pkg/machine/config_test.go3
-rw-r--r--pkg/machine/qemu/config_test.go3
-rw-r--r--pkg/machine/qemu/machine.go2
-rw-r--r--pkg/machine/qemu/machine_test.go3
-rw-r--r--pkg/specgen/generate/container.go11
-rw-r--r--pkg/specgen/generate/kube/kube.go8
-rw-r--r--pkg/specgen/generate/kube/play_test.go4
-rw-r--r--pkg/specgen/specgen.go6
-rw-r--r--pkg/specgen/volumes.go2
-rw-r--r--pkg/specgenutil/specgen.go12
-rw-r--r--pkg/specgenutil/volumes.go2
-rw-r--r--test/compose/disable_healthcheck/docker-compose.yml10
-rw-r--r--test/compose/disable_healthcheck/tests.sh2
-rw-r--r--test/compose/update_network_mtu/docker-compose.yml26
-rw-r--r--test/compose/update_network_mtu/tests.sh10
-rw-r--r--test/e2e/build/Containerfile.with-platform1
-rw-r--r--test/e2e/common_test.go17
-rw-r--r--test/e2e/play_kube_test.go2
-rw-r--r--test/e2e/pod_create_test.go21
-rw-r--r--test/e2e/run_aardvark_test.go31
-rw-r--r--test/e2e/run_test.go28
-rw-r--r--test/e2e/system_df_test.go13
-rw-r--r--test/system/060-mount.bats2
-rw-r--r--test/system/130-kill.bats10
-rw-r--r--test/system/200-pod.bats10
-rw-r--r--test/testvol/main.go30
-rw-r--r--troubleshooting.md57
-rw-r--r--utils/ports.go9
-rw-r--r--utils/utils.go9
-rw-r--r--utils/utils_supported.go7
-rw-r--r--utils/utils_windows.go2
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go4
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/default_linux.go3
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp.json3
-rw-r--r--vendor/modules.txt2
77 files changed, 811 insertions, 508 deletions
diff --git a/cmd/podman-mac-helper/install.go b/cmd/podman-mac-helper/install.go
index 7b8753820..713bdfcdf 100644
--- a/cmd/podman-mac-helper/install.go
+++ b/cmd/podman-mac-helper/install.go
@@ -193,7 +193,7 @@ func verifyRootDeep(path string) error {
func installExecutable(user string) (string, error) {
// Since the installed executable runs as root, as a precaution verify root ownership of
- // the entire installation path, and utilize sticky + read only perms for the helper path
+ // the entire installation path, and utilize sticky + read-only perms for the helper path
// suffix. The goal is to help users harden against privilege escalation from loose
// filesystem permissions.
//
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index e25bdd241..f05549a8d 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -863,14 +863,6 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
_ = cmd.RegisterFlagCompletionFunc(cpusetMemsFlagName, completion.AutocompleteNone)
- memoryFlagName := "memory"
- createFlags.StringVarP(
- &cf.Memory,
- memoryFlagName, "m", "",
- "Memory limit "+sizeWithUnitFormat,
- )
- _ = cmd.RegisterFlagCompletionFunc(memoryFlagName, completion.AutocompleteNone)
-
memoryReservationFlagName := "memory-reservation"
createFlags.StringVar(
&cf.MemoryReservation,
@@ -912,4 +904,12 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
"CPUs in which to allow execution (0-3, 0,1)",
)
_ = cmd.RegisterFlagCompletionFunc(cpusetCpusFlagName, completion.AutocompleteNone)
+
+ memoryFlagName := "memory"
+ createFlags.StringVarP(
+ &cf.Memory,
+ memoryFlagName, "m", "",
+ "Memory limit "+sizeWithUnitFormat,
+ )
+ _ = cmd.RegisterFlagCompletionFunc(memoryFlagName, completion.AutocompleteNone)
}
diff --git a/cmd/podman/containers/clone.go b/cmd/podman/containers/clone.go
index f8d5a2d80..9881a791c 100644
--- a/cmd/podman/containers/clone.go
+++ b/cmd/podman/containers/clone.go
@@ -63,7 +63,7 @@ func clone(cmd *cobra.Command, args []string) error {
ctrClone.Image = args[2]
if !cliVals.RootFS {
rawImageName := args[0]
- name, err := PullImage(ctrClone.Image, ctrClone.CreateOpts)
+ name, err := PullImage(ctrClone.Image, &ctrClone.CreateOpts)
if err != nil {
return err
}
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index aa7040bcc..05a59ce7b 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -141,7 +141,7 @@ func create(cmd *cobra.Command, args []string) error {
rawImageName := ""
if !cliVals.RootFS {
rawImageName = args[0]
- name, err := PullImage(args[0], cliVals)
+ name, err := PullImage(args[0], &cliVals)
if err != nil {
return err
}
@@ -305,7 +305,8 @@ func CreateInit(c *cobra.Command, vals entities.ContainerCreateOptions, isInfra
return vals, nil
}
-func PullImage(imageName string, cliVals entities.ContainerCreateOptions) (string, error) {
+// Pulls image if any also parses and populates OS, Arch and Variant in specified container create options
+func PullImage(imageName string, cliVals *entities.ContainerCreateOptions) (string, error) {
pullPolicy, err := config.ParsePullPolicy(cliVals.Pull)
if err != nil {
return "", err
diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go
index 1176b866d..ef13ea95e 100644
--- a/cmd/podman/containers/run.go
+++ b/cmd/podman/containers/run.go
@@ -141,7 +141,7 @@ func run(cmd *cobra.Command, args []string) error {
rawImageName := ""
if !cliVals.RootFS {
rawImageName = args[0]
- name, err := PullImage(args[0], cliVals)
+ name, err := PullImage(args[0], &cliVals)
if err != nil {
return err
}
@@ -192,6 +192,9 @@ func run(cmd *cobra.Command, args []string) error {
return err
}
s.RawImageName = rawImageName
+ s.ImageOS = cliVals.OS
+ s.ImageArch = cliVals.Arch
+ s.ImageVariant = cliVals.Variant
s.Passwd = &runOpts.Passwd
runOpts.Spec = s
diff --git a/cmd/podman/system/df.go b/cmd/podman/system/df.go
index 2fcc12feb..5b8126be6 100644
--- a/cmd/podman/system/df.go
+++ b/cmd/podman/system/df.go
@@ -78,11 +78,11 @@ func printSummary(cmd *cobra.Command, reports *entities.SystemDfReport) error {
}
}
imageSummary := dfSummary{
- Type: "Images",
- Total: len(reports.Images),
- Active: active,
- size: size,
- reclaimable: reclaimable,
+ Type: "Images",
+ Total: len(reports.Images),
+ Active: active,
+ RawSize: size,
+ RawReclaimable: reclaimable,
}
dfSummaries = append(dfSummaries, &imageSummary)
@@ -100,11 +100,11 @@ func printSummary(cmd *cobra.Command, reports *entities.SystemDfReport) error {
conSize += c.RWSize
}
containerSummary := dfSummary{
- Type: "Containers",
- Total: len(reports.Containers),
- Active: conActive,
- size: conSize,
- reclaimable: conReclaimable,
+ Type: "Containers",
+ Total: len(reports.Containers),
+ Active: conActive,
+ RawSize: conSize,
+ RawReclaimable: conReclaimable,
}
dfSummaries = append(dfSummaries, &containerSummary)
@@ -120,11 +120,11 @@ func printSummary(cmd *cobra.Command, reports *entities.SystemDfReport) error {
volumesReclaimable += v.ReclaimableSize
}
volumeSummary := dfSummary{
- Type: "Local Volumes",
- Total: len(reports.Volumes),
- Active: activeVolumes,
- size: volumesSize,
- reclaimable: volumesReclaimable,
+ Type: "Local Volumes",
+ Total: len(reports.Volumes),
+ Active: activeVolumes,
+ RawSize: volumesSize,
+ RawReclaimable: volumesReclaimable,
}
dfSummaries = append(dfSummaries, &volumeSummary)
@@ -277,22 +277,22 @@ func (d *dfVolume) Size() string {
}
type dfSummary struct {
- Type string
- Total int
- Active int
- size int64
- reclaimable int64
+ Type string
+ Total int
+ Active int
+ RawSize int64 `json:"Size"`
+ RawReclaimable int64 `json:"Reclaimable"`
}
func (d *dfSummary) Size() string {
- return units.HumanSize(float64(d.size))
+ return units.HumanSize(float64(d.RawSize))
}
func (d *dfSummary) Reclaimable() string {
percent := 0
// make sure to check this to prevent div by zero problems
- if d.size > 0 {
- percent = int(math.Round(float64(d.reclaimable) / float64(d.size) * float64(100)))
+ if d.RawSize > 0 {
+ percent = int(math.Round(float64(d.RawReclaimable) / float64(d.RawSize) * float64(100)))
}
- return fmt.Sprintf("%s (%d%%)", units.HumanSize(float64(d.reclaimable)), percent)
+ return fmt.Sprintf("%s (%d%%)", units.HumanSize(float64(d.RawReclaimable)), percent)
}
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index 425ce7bcc..403327d82 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -881,11 +881,11 @@ Suppress output information when pulling images
#### **--read-only**
-Mount the container's root filesystem as read only.
+Mount the container's root filesystem as read-only.
By default a container will have its root filesystem writable allowing processes
to write files anywhere. By specifying the `--read-only` flag the container will have
-its root filesystem mounted as read only prohibiting any writes.
+its root filesystem mounted as read-only prohibiting any writes.
#### **--read-only-tmpfs**
@@ -1006,8 +1006,8 @@ Note: Labeling can be disabled for all containers by setting label=false in the
possible mount options are specified in the **proc(5)** man page.
-- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
+- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read-only by default.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read-only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
Note: Labeling can be disabled for all containers by setting label=false in the **containers.conf** (`/etc/containers/containers.conf` or `$HOME/.config/containers/containers.conf`) file.
diff --git a/docs/source/markdown/podman-pod-clone.1.md b/docs/source/markdown/podman-pod-clone.1.md
index e44e9fa3c..a18f7dbfe 100644
--- a/docs/source/markdown/podman-pod-clone.1.md
+++ b/docs/source/markdown/podman-pod-clone.1.md
@@ -80,6 +80,16 @@ Add metadata to a pod (e.g., --label com.example.key=value).
Read in a line delimited file of labels.
+#### **--memory**, **-m**=*limit*
+
+Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
+
+Constrains the memory available to a container. If the host
+supports swap memory, then the **-m** memory setting can be larger than physical
+RAM. If a limit of 0 is specified (not using **-m**), the container's memory is
+not limited. The actual limit may be rounded up to a multiple of the operating
+system's page size (the value would be very large, that's millions of trillions).
+
#### **--name**, **-n**
Set a custom name for the cloned pod. The default if not specified is of the syntax: **<ORIGINAL_NAME>-clone**
@@ -119,8 +129,8 @@ Note: Labeling can be disabled for all pods/containers by setting label=false in
- `proc-opts=OPTIONS` : Comma-separated list of options to use for the /proc mount. More details for the
possible mount options are specified in the **proc(5)** man page.
-- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
+- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read-only by default.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read-only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
Note: Labeling can be disabled for all containers by setting label=false in the **containers.conf** (`/etc/containers/containers.conf` or `$HOME/.config/containers/containers.conf`) file.
diff --git a/docs/source/markdown/podman-pod-create.1.md b/docs/source/markdown/podman-pod-create.1.md
index e63623169..75d2bb611 100644
--- a/docs/source/markdown/podman-pod-create.1.md
+++ b/docs/source/markdown/podman-pod-create.1.md
@@ -164,6 +164,16 @@ according to RFC4862.
To specify multiple static MAC addresses per pod, set multiple networks using the **--network** option with a static MAC address specified for each using the `mac` mode for that option.
+#### **--memory**, **-m**=*limit*
+
+Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes))
+
+Constrains the memory available to a container. If the host
+supports swap memory, then the **-m** memory setting can be larger than physical
+RAM. If a limit of 0 is specified (not using **-m**), the container's memory is
+not limited. The actual limit may be rounded up to a multiple of the operating
+system's page size (the value would be very large, that's millions of trillions).
+
#### **--name**=*name*, **-n**
@@ -283,8 +293,8 @@ Note: Labeling can be disabled for all pods/containers by setting label=false in
- `proc-opts=OPTIONS` : Comma-separated list of options to use for the /proc mount. More details for the
possible mount options are specified in the **proc(5)** man page.
-- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
+- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read-only by default.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.** The default paths that are read-only are **/proc/asound, /proc/bus, /proc/fs, /proc/irq, /proc/sys, /proc/sysrq-trigger, /sys/fs/cgroup**.
Note: Labeling can be disabled for all containers by setting label=false in the **containers.conf** (`/etc/containers/containers.conf` or `$HOME/.config/containers/containers.conf`) file.
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index 5b45c3350..8f71c3706 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -919,11 +919,11 @@ Suppress output information when pulling images
#### **--read-only**
-Mount the container's root filesystem as read only.
+Mount the container's root filesystem as read-only.
By default a container will have its root filesystem writable allowing processes
to write files anywhere. By specifying the **--read-only** flag, the container will have
-its root filesystem mounted as read only prohibiting any writes.
+its root filesystem mounted as read-only prohibiting any writes.
#### **--read-only-tmpfs**
@@ -1051,8 +1051,8 @@ Note: Labeling can be disabled for all containers by setting label=false in the
- **proc-opts**=_OPTIONS_ : Comma-separated list of options to use for the /proc mount. More details
for the possible mount options are specified in the **proc(5)** man page.
-- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read only by default.
- The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.**. The default paths that are read only are **/proc/asound**, **/proc/bus**, **/proc/fs**, **/proc/irq**, **/proc/sys**, **/proc/sysrq-trigger**, **/sys/fs/cgroup**.
+- **unmask**=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it will unmask all the paths that are masked or made read-only by default.
+ The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux.**. The default paths that are read-only are **/proc/asound**, **/proc/bus**, **/proc/fs**, **/proc/irq**, **/proc/sys**, **/proc/sysrq-trigger**, **/sys/fs/cgroup**.
Note: Labeling can be disabled for all containers by setting **label=false** in the **containers.conf**(5) file.
@@ -1603,7 +1603,7 @@ content. Installing packages into _/usr_, for example. In production,
applications seldom need to write to the image. Container applications write
to volumes if they need to write to file systems at all. Applications can be
made more secure by running them in read-only mode using the **--read-only** switch.
-This protects the containers image from modification. Read only containers may
+This protects the containers image from modification. Read-only containers may
still need to write temporary data. The best way to handle this is to mount
tmpfs directories on _/run_ and _/tmp_.
diff --git a/docs/tutorials/remote_client.md b/docs/tutorials/remote_client.md
index 27b97e6f5..5cd679193 100644
--- a/docs/tutorials/remote_client.md
+++ b/docs/tutorials/remote_client.md
@@ -54,7 +54,7 @@ host:
In order for the Podman client to communicate with the server you need to enable and start the SSH daemon on your Linux machine, if it is not currently enabled.
```
-sudo systemctl enable --now -s sshd
+sudo systemctl enable --now sshd
```
#### Setting up SSH
diff --git a/go.mod b/go.mod
index 35d4b931f..521521d9a 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/containernetworking/cni v1.1.1
github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c
- github.com/containers/common v0.48.1-0.20220628131511-a8336c1613fe
+ github.com/containers/common v0.48.1-0.20220630172158-178929cf063e
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c
github.com/containers/ocicrypt v1.1.5
diff --git a/go.sum b/go.sum
index c34b5dfbb..aed64a5c6 100644
--- a/go.sum
+++ b/go.sum
@@ -338,8 +338,8 @@ github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19
github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c h1:/fKyiLFFuceBPZGJ0Lig7ElURhfsslAOw1BOcItD+X8=
github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c/go.mod h1:b0L+u2Dam7soWGn5sVTK31L++Xrf80AbGvK5z9D2+lw=
github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9/go.mod h1:WBLwq+i7bicCpH54V70HM6s7jqDAESTlYnd05XXp0ac=
-github.com/containers/common v0.48.1-0.20220628131511-a8336c1613fe h1:H5YI9PXhDB974IkSCUaha+AF60TunRdHaGElZroYx7M=
-github.com/containers/common v0.48.1-0.20220628131511-a8336c1613fe/go.mod h1:UDe7OTpNdtJA2T80Sp7yB0yTaj79f4kMNQbTsNxsqoY=
+github.com/containers/common v0.48.1-0.20220630172158-178929cf063e h1:Vf5tsGrLC2B2omVBP3AdDA7YlE/VoMdNyQ5yPF8GRoY=
+github.com/containers/common v0.48.1-0.20220630172158-178929cf063e/go.mod h1:Zt3D/IhgFyG1oaBrqsbn9NdH/4fkjsO2Y0ahP12ieu4=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4/go.mod h1:OsX9sFexyGF0FCNAjfcVFv3IwMqDyLyV/WQY/roLPcE=
@@ -1268,7 +1268,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
-github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
diff --git a/hack/podman-registry-go/registry.go b/hack/podman-registry-go/registry.go
index af8f3117c..d66d092b6 100644
--- a/hack/podman-registry-go/registry.go
+++ b/hack/podman-registry-go/registry.go
@@ -1,10 +1,10 @@
package registry
import (
+ "fmt"
"strings"
"github.com/containers/podman/v4/utils"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -57,7 +57,7 @@ func StartWithOptions(options *Options) (*Registry, error) {
// Start a registry.
out, err := utils.ExecCmd(binary, args...)
if err != nil {
- return nil, errors.Wrapf(err, "error running %q: %s", binary, out)
+ return nil, fmt.Errorf("error running %q: %s: %w", binary, out, err)
}
// Parse the output.
@@ -68,7 +68,7 @@ func StartWithOptions(options *Options) (*Registry, error) {
}
spl := strings.Split(s, "=")
if len(spl) != 2 {
- return nil, errors.Errorf("unexpected output format %q: want 'PODMAN_...=...'", s)
+ return nil, fmt.Errorf("unexpected output format %q: want 'PODMAN_...=...'", s)
}
key := spl[0]
val := strings.TrimSuffix(strings.TrimPrefix(spl[1], "\""), "\"")
@@ -88,16 +88,16 @@ func StartWithOptions(options *Options) (*Registry, error) {
// Extra sanity check.
if registry.Image == "" {
- return nil, errors.Errorf("unexpected output %q: %q missing", out, ImageKey)
+ return nil, fmt.Errorf("unexpected output %q: %q missing", out, ImageKey)
}
if registry.User == "" {
- return nil, errors.Errorf("unexpected output %q: %q missing", out, UserKey)
+ return nil, fmt.Errorf("unexpected output %q: %q missing", out, UserKey)
}
if registry.Password == "" {
- return nil, errors.Errorf("unexpected output %q: %q missing", out, PassKey)
+ return nil, fmt.Errorf("unexpected output %q: %q missing", out, PassKey)
}
if registry.Port == "" {
- return nil, errors.Errorf("unexpected output %q: %q missing", out, PortKey)
+ return nil, fmt.Errorf("unexpected output %q: %q missing", out, PortKey)
}
registry.running = true
@@ -112,7 +112,7 @@ func (r *Registry) Stop() error {
return nil
}
if _, err := utils.ExecCmd(binary, "-P", r.Port, "stop"); err != nil {
- return errors.Wrapf(err, "error stopping registry (%v) with %q", *r, binary)
+ return fmt.Errorf("error stopping registry (%v) with %q: %w", *r, binary, err)
}
r.running = false
return nil
diff --git a/libpod/container.go b/libpod/container.go
index 3a15cfbdb..0619471b4 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -1118,7 +1118,7 @@ func (c *Container) IsInitCtr() bool {
return len(c.config.InitContainerType) > 0
}
-// IsReadOnly returns whether the container is running in read only mode
+// IsReadOnly returns whether the container is running in read-only mode
func (c *Container) IsReadOnly() bool {
return c.config.Spec.Root.Readonly
}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index f35cce772..39303eef6 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -761,19 +761,8 @@ func (c *Container) Sync() error {
defer c.lock.Unlock()
}
- // If runtime knows about the container, update its status in runtime
- // And then save back to disk
- if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopped, define.ContainerStateStopping) {
- oldState := c.state.State
- if err := c.ociRuntime.UpdateContainerStatus(c); err != nil {
- return err
- }
- // Only save back to DB if state changed
- if c.state.State != oldState {
- if err := c.save(); err != nil {
- return err
- }
- }
+ if err := c.syncContainer(); err != nil {
+ return err
}
defer c.newContainerEvent(events.Sync)
diff --git a/libpod/define/healthchecks.go b/libpod/define/healthchecks.go
index bde449d30..f71274350 100644
--- a/libpod/define/healthchecks.go
+++ b/libpod/define/healthchecks.go
@@ -47,3 +47,13 @@ const (
// DefaultHealthCheckTimeout default value
DefaultHealthCheckTimeout = "30s"
)
+
+// HealthConfig.Test options
+const (
+ // HealthConfigTestNone disables healthcheck
+ HealthConfigTestNone = "NONE"
+ // HealthConfigTestCmd execs arguments directly
+ HealthConfigTestCmd = "CMD"
+ // HealthConfigTestCmdShell runs commands with the system's default shell
+ HealthConfigTestCmdShell = "CMD-SHELL"
+)
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index c387856e5..935e0f5f9 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -69,6 +69,8 @@ type InspectPodData struct {
VolumesFrom []string `json:"volumes_from,omitempty"`
// SecurityOpt contains the specified security labels and related SELinux information
SecurityOpts []string `json:"security_opt,omitempty"`
+ // MemoryLimit contains the specified cgroup memory limit for the pod
+ MemoryLimit uint64 `json:"memory_limit,omitempty"`
}
// InspectPodInfraConfig contains the configuration of the pod's infra
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index 95c70b60e..df6f00e7e 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -47,11 +47,11 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
switch hcCommand[0] {
- case "", "NONE":
+ case "", define.HealthConfigTestNone:
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
- case "CMD":
+ case define.HealthConfigTestCmd:
newCommand = hcCommand[1:]
- case "CMD-SHELL":
+ case define.HealthConfigTestCmdShell:
// TODO: SHELL command from image not available in Container - use Docker default
newCommand = []string{"/bin/sh", "-c", strings.Join(hcCommand[1:], " ")}
default:
diff --git a/libpod/pod.go b/libpod/pod.go
index 2502c41a9..c8c6790e8 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -169,6 +169,23 @@ func (p *Pod) CPUQuota() int64 {
return 0
}
+// MemoryLimit returns the pod Memory Limit
+func (p *Pod) MemoryLimit() uint64 {
+ if p.state.InfraContainerID == "" {
+ return 0
+ }
+ infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
+ if err != nil {
+ return 0
+ }
+ conf := infra.config.Spec
+ if conf != nil && conf.Linux != nil && conf.Linux.Resources != nil && conf.Linux.Resources.Memory != nil && conf.Linux.Resources.Memory.Limit != nil {
+ val := *conf.Linux.Resources.Memory.Limit
+ return uint64(val)
+ }
+ return 0
+}
+
// NetworkMode returns the Network mode given by the user ex: pod, private...
func (p *Pod) NetworkMode() string {
infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index fefe0e329..f06e62007 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -751,6 +751,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
CPUSetCPUs: p.ResourceLim().CPU.Cpus,
CPUPeriod: p.CPUPeriod(),
CPUQuota: p.CPUQuota(),
+ MemoryLimit: p.MemoryLimit(),
Mounts: inspectMounts,
Devices: devices,
BlkioDeviceReadBps: deviceLimits,
diff --git a/libpod/runtime.go b/libpod/runtime.go
index da57c20c7..ea4b34954 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"context"
+ "errors"
"fmt"
"os"
"os/exec"
@@ -40,7 +41,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/docker/docker/pkg/namesgenerator"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -146,7 +146,7 @@ func SetXdgDirs() error {
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
- return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
+ return fmt.Errorf("cannot set XDG_RUNTIME_DIR: %w", err)
}
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
@@ -163,7 +163,7 @@ func SetXdgDirs() error {
return err
}
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
- return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
+ return fmt.Errorf("cannot set XDG_CONFIG_HOME: %w", err)
}
}
return nil
@@ -214,7 +214,7 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
// Overwrite config with user-given configuration options
for _, opt := range options {
if err := opt(runtime); err != nil {
- return nil, errors.Wrapf(err, "error configuring runtime")
+ return nil, fmt.Errorf("error configuring runtime: %w", err)
}
}
@@ -225,12 +225,12 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
}
os.Exit(1)
return nil
- }); err != nil && errors.Cause(err) != shutdown.ErrHandlerExists {
+ }); err != nil && !errors.Is(err, shutdown.ErrHandlerExists) {
logrus.Errorf("Registering shutdown handler for libpod: %v", err)
}
if err := shutdown.Start(); err != nil {
- return nil, errors.Wrapf(err, "error starting shutdown signal handler")
+ return nil, fmt.Errorf("error starting shutdown signal handler: %w", err)
}
if err := makeRuntime(runtime); err != nil {
@@ -256,10 +256,10 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
manager, err = lock.OpenFileLockManager(lockPath)
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
manager, err = lock.NewFileLockManager(lockPath)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get new file lock manager")
+ return nil, fmt.Errorf("failed to get new file lock manager: %w", err)
}
} else {
return nil, err
@@ -275,19 +275,19 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
switch {
- case os.IsNotExist(errors.Cause(err)):
+ case errors.Is(err, os.ErrNotExist):
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get new shm lock manager")
+ return nil, fmt.Errorf("failed to get new shm lock manager: %w", err)
}
- case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber:
+ case errors.Is(err, syscall.ERANGE) && runtime.doRenumber:
logrus.Debugf("Number of locks does not match - removing old locks")
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
- return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ return nil, fmt.Errorf("error removing libpod locks file %s: %w", lockPath, err)
}
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
@@ -299,7 +299,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
}
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType)
+ return nil, fmt.Errorf("unknown lock type %s: %w", runtime.config.Engine.LockType, define.ErrInvalidArg)
}
return manager, nil
}
@@ -315,17 +315,17 @@ func makeRuntime(runtime *Runtime) (retErr error) {
runtime.conmonPath = cPath
if runtime.noStore && runtime.doReset {
- return errors.Wrapf(define.ErrInvalidArg, "cannot perform system reset if runtime is not creating a store")
+ return fmt.Errorf("cannot perform system reset if runtime is not creating a store: %w", define.ErrInvalidArg)
}
if runtime.doReset && runtime.doRenumber {
- return errors.Wrapf(define.ErrInvalidArg, "cannot perform system reset while renumbering locks")
+ return fmt.Errorf("cannot perform system reset while renumbering locks: %w", define.ErrInvalidArg)
}
// Make the static files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating runtime static files directory")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating runtime static files directory: %w", err)
}
}
@@ -337,9 +337,9 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// package.
switch runtime.config.Engine.StateType {
case config.InMemoryStateStore:
- return errors.Wrapf(define.ErrInvalidArg, "in-memory state is currently disabled")
+ return fmt.Errorf("in-memory state is currently disabled: %w", define.ErrInvalidArg)
case config.SQLiteStateStore:
- return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
+ return fmt.Errorf("SQLite state is currently disabled: %w", define.ErrInvalidArg)
case config.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
@@ -349,7 +349,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
runtime.state = state
default:
- return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType)
+ return fmt.Errorf("unrecognized state type passed (%v): %w", runtime.config.Engine.StateType, define.ErrInvalidArg)
}
// Grab config from the database so we can reset some defaults
@@ -369,7 +369,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
}
- return errors.Wrapf(err, "error retrieving runtime configuration from database")
+ return fmt.Errorf("error retrieving runtime configuration from database: %w", err)
}
runtime.mergeDBConfig(dbConfig)
@@ -412,7 +412,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil {
- return errors.Wrapf(err, "error setting libpod namespace in state")
+ return fmt.Errorf("error setting libpod namespace in state: %w", err)
}
logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace)
@@ -468,16 +468,16 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating tmpdir")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating tmpdir: %w", err)
}
}
// Create events log dir
if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating events dirs")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating events dirs: %w", err)
}
}
@@ -514,7 +514,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
} else {
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
if !ok {
- return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime)
+ return fmt.Errorf("default OCI runtime %q not found: %w", runtime.config.Engine.OCIRuntime, define.ErrInvalidArg)
}
runtime.defaultOCIRuntime = ociRuntime
}
@@ -523,19 +523,19 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// Do we have at least one valid OCI runtime?
if len(runtime.ociRuntimes) == 0 {
- return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
+ return fmt.Errorf("no OCI runtime has been configured: %w", define.ErrInvalidArg)
}
// Do we have a default runtime?
if runtime.defaultOCIRuntime == nil {
- return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
+ return fmt.Errorf("no default OCI runtime was configured: %w", define.ErrInvalidArg)
}
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrapf(err, "error creating runtime temporary files directory")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating runtime temporary files directory: %w", err)
}
}
@@ -556,7 +556,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
if err != nil {
- return errors.Wrapf(err, "error acquiring runtime init lock")
+ return fmt.Errorf("error acquiring runtime init lock: %w", err)
}
// Acquire the lock and hold it until we return
// This ensures that no two processes will be in runtime.refresh at once
@@ -586,7 +586,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
pausePid, err := util.GetRootlessPauseProcessPidPathGivenDir(runtime.config.Engine.TmpDir)
if err != nil {
- return errors.Wrapf(err, "could not get pause process pid file path")
+ return fmt.Errorf("could not get pause process pid file path: %w", err)
}
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
if err != nil {
@@ -607,10 +607,10 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// This will trigger on first use as well, but refreshing an
// empty state only creates a single file
// As such, it's not really a performance concern
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
doRefresh = true
} else {
- return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
+ return fmt.Errorf("error reading runtime status file %s: %w", runtimeAliveFile, err)
}
}
@@ -704,14 +704,14 @@ func findConmon(conmonPaths []string) (string, error) {
}
if foundOutdatedConmon {
- return "", errors.Wrapf(define.ErrConmonOutdated,
- "please update to v%d.%d.%d or later",
- conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion)
+ return "", fmt.Errorf(
+ "please update to v%d.%d.%d or later: %w",
+ conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion, define.ErrConmonOutdated)
}
- return "", errors.Wrapf(define.ErrInvalidArg,
- "could not find a working conmon binary (configured options: %v)",
- conmonPaths)
+ return "", fmt.Errorf(
+ "could not find a working conmon binary (configured options: %v): %w",
+ conmonPaths, define.ErrInvalidArg)
}
// probeConmon calls conmon --version and verifies it is a new enough version for
@@ -728,11 +728,11 @@ func probeConmon(conmonBinary string) error {
matches := r.FindStringSubmatch(out.String())
if len(matches) != 4 {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
major, err := strconv.Atoi(matches[1])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if major < conmonMinMajorVersion {
return define.ErrConmonOutdated
@@ -743,7 +743,7 @@ func probeConmon(conmonBinary string) error {
minor, err := strconv.Atoi(matches[2])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if minor < conmonMinMinorVersion {
return define.ErrConmonOutdated
@@ -754,7 +754,7 @@ func probeConmon(conmonBinary string) error {
patch, err := strconv.Atoi(matches[3])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if patch < conmonMinPatchVersion {
return define.ErrConmonOutdated
@@ -798,7 +798,7 @@ func (r *Runtime) GetConfig() (*config.Config, error) {
// Copy so the caller won't be able to modify the actual config
if err := JSONDeepCopy(rtConfig, config); err != nil {
- return nil, errors.Wrapf(err, "error copying config")
+ return nil, fmt.Errorf("error copying config: %w", err)
}
return config, nil
@@ -909,7 +909,7 @@ func (r *Runtime) Shutdown(force bool) error {
// Note that the libimage runtime shuts down the store.
if err := r.libimageRuntime.Shutdown(force); err != nil {
- lastError = errors.Wrapf(err, "error shutting down container storage")
+ lastError = fmt.Errorf("error shutting down container storage: %w", err)
}
}
if err := r.state.Close(); err != nil {
@@ -941,15 +941,15 @@ func (r *Runtime) refresh(alivePath string) error {
// Containers, pods, and volumes must also reacquire their locks.
ctrs, err := r.state.AllContainers()
if err != nil {
- return errors.Wrapf(err, "error retrieving all containers from state")
+ return fmt.Errorf("error retrieving all containers from state: %w", err)
}
pods, err := r.state.AllPods()
if err != nil {
- return errors.Wrapf(err, "error retrieving all pods from state")
+ return fmt.Errorf("error retrieving all pods from state: %w", err)
}
vols, err := r.state.AllVolumes()
if err != nil {
- return errors.Wrapf(err, "error retrieving all volumes from state")
+ return fmt.Errorf("error retrieving all volumes from state: %w", err)
}
// No locks are taken during pod, volume, and container refresh.
// Furthermore, the pod/volume/container refresh() functions are not
@@ -977,7 +977,7 @@ func (r *Runtime) refresh(alivePath string) error {
// Create a file indicating the runtime is alive and ready
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
- return errors.Wrap(err, "error creating runtime status file")
+ return fmt.Errorf("error creating runtime status file: %w", err)
}
defer file.Close()
@@ -998,13 +998,13 @@ func (r *Runtime) generateName() (string, error) {
// Make sure container with this name does not exist
if _, err := r.state.LookupContainer(name); err == nil {
continue
- } else if errors.Cause(err) != define.ErrNoSuchCtr {
+ } else if !errors.Is(err, define.ErrNoSuchCtr) {
return "", err
}
// Make sure pod with this name does not exist
if _, err := r.state.LookupPod(name); err == nil {
continue
- } else if errors.Cause(err) != define.ErrNoSuchPod {
+ } else if !errors.Is(err, define.ErrNoSuchPod) {
return "", err
}
return name, nil
@@ -1205,7 +1205,7 @@ func (r *Runtime) getVolumePlugin(volConfig *VolumeConfig) (*plugin.VolumePlugin
pluginPath, ok := r.config.Engine.VolumePlugins[name]
if !ok {
- return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name)
+ return nil, fmt.Errorf("no volume plugin with name %s available: %w", name, define.ErrMissingPlugin)
}
return plugin.GetVolumePlugin(name, pluginPath, timeout)
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
index 1c528e1b8..047375628 100644
--- a/libpod/runtime_cstorage.go
+++ b/libpod/runtime_cstorage.go
@@ -1,11 +1,12 @@
package libpod
import (
+ "errors"
+ "fmt"
"time"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,7 +39,7 @@ func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) {
// Look up if container is in state
hasCtr, err := r.state.HasContainer(ctr.ID)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container %s in state", ctr.ID)
+ return nil, fmt.Errorf("error looking up container %s in state: %w", ctr.ID, err)
}
storageCtr.PresentInLibpod = hasCtr
@@ -60,20 +61,20 @@ func (r *Runtime) StorageContainer(idOrName string) (*storage.Container, error)
func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
targetID, err := r.store.Lookup(idOrName)
if err != nil {
- if errors.Cause(err) == storage.ErrLayerUnknown {
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID or name %q found", idOrName)
+ if errors.Is(err, storage.ErrLayerUnknown) {
+ return fmt.Errorf("no container with ID or name %q found: %w", idOrName, define.ErrNoSuchCtr)
}
- return errors.Wrapf(err, "error looking up container %q", idOrName)
+ return fmt.Errorf("error looking up container %q: %w", idOrName, err)
}
// Lookup returns an ID but it's not guaranteed to be a container ID.
// So we can still error here.
ctr, err := r.store.Container(targetID)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
- return errors.Wrapf(define.ErrNoSuchCtr, "%q does not refer to a container", idOrName)
+ if errors.Is(err, storage.ErrContainerUnknown) {
+ return fmt.Errorf("%q does not refer to a container: %w", idOrName, define.ErrNoSuchCtr)
}
- return errors.Wrapf(err, "error retrieving container %q", idOrName)
+ return fmt.Errorf("error retrieving container %q: %w", idOrName, err)
}
// Error out if the container exists in libpod
@@ -82,13 +83,13 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
return err
}
if exists {
- return errors.Wrapf(define.ErrCtrExists, "refusing to remove %q as it exists in libpod as container %s", idOrName, ctr.ID)
+ return fmt.Errorf("refusing to remove %q as it exists in libpod as container %s: %w", idOrName, ctr.ID, define.ErrCtrExists)
}
if !force {
timesMounted, err := r.store.Mounted(ctr.ID)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
// Container was removed from under us.
// It's gone, so don't bother erroring.
logrus.Infof("Storage for container %s already removed", ctr.ID)
@@ -97,7 +98,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
logrus.Warnf("Checking if container %q is mounted, attempting to delete: %v", idOrName, err)
}
if timesMounted > 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName)
+ return fmt.Errorf("container %q is mounted and cannot be removed without using force: %w", idOrName, define.ErrCtrStateInvalid)
}
} else if _, err := r.store.Unmount(ctr.ID, true); err != nil {
if errors.Is(err, storage.ErrContainerUnknown) {
@@ -109,12 +110,12 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
}
if err := r.store.DeleteContainer(ctr.ID); err != nil {
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
// Container again gone, no error
logrus.Infof("Storage for container %s already removed", ctr.ID)
return nil
}
- return errors.Wrapf(err, "error removing storage for container %q", idOrName)
+ return fmt.Errorf("error removing storage for container %q: %w", idOrName, err)
}
return nil
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 4d34c6a08..ce0fd869d 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path"
@@ -26,7 +27,6 @@ import (
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -86,7 +86,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
ctr, err := r.initContainerVariables(rSpec, config)
if err != nil {
- return nil, errors.Wrapf(err, "error initializing container variables")
+ return nil, fmt.Errorf("error initializing container variables: %w", err)
}
// For an imported checkpoint no one has ever set the StartedTime. Set it now.
ctr.state.StartedTime = time.Now()
@@ -126,7 +126,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
// the config was re-written.
newConf, err := r.state.GetContainerConfig(ctr.ID())
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", ctr.ID())
+ return nil, fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", ctr.ID(), err)
}
ctr.config = newConf
@@ -143,7 +143,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
// Set config back to the old name so reflect what is actually
// present in the DB.
ctr.config.Name = oldName
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ return nil, fmt.Errorf("error renaming container %s: %w", ctr.ID(), err)
}
// Step 3: rename the container in c/storage.
@@ -162,7 +162,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
if rSpec == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container")
+ return nil, fmt.Errorf("must provide a valid runtime spec to create container: %w", define.ErrInvalidArg)
}
ctr := new(Container)
ctr.config = new(ContainerConfig)
@@ -172,7 +172,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
ctr.config.ID = stringid.GenerateNonCryptoID()
size, err := units.FromHumanSize(r.config.Containers.ShmSize)
if err != nil {
- return nil, errors.Wrapf(err, "converting containers.conf ShmSize %s to an int", r.config.Containers.ShmSize)
+ return nil, fmt.Errorf("converting containers.conf ShmSize %s to an int: %w", r.config.Containers.ShmSize, err)
}
ctr.config.ShmSize = size
ctr.config.NoShm = false
@@ -184,7 +184,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
// This is a restore from an imported checkpoint
ctr.restoreFromCheckpoint = true
if err := JSONDeepCopy(config, ctr.config); err != nil {
- return nil, errors.Wrapf(err, "error copying container config for restore")
+ return nil, fmt.Errorf("error copying container config for restore: %w", err)
}
// If the ID is empty a new name for the restored container was requested
if ctr.config.ID == "" {
@@ -224,12 +224,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr, err = r.initContainerVariables(rSpec, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error initializing container variables")
+ return nil, fmt.Errorf("error initializing container variables: %w", err)
}
for _, option := range options {
if err := option(ctr); err != nil {
- return nil, errors.Wrapf(err, "error running container create option")
+ return nil, fmt.Errorf("error running container create option: %w", err)
}
}
@@ -248,7 +248,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
if opts.InterfaceName != "" {
// check that no name is assigned to more than network
if cutil.StringInSlice(opts.InterfaceName, usedIfNames) {
- return nil, errors.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
+ return nil, fmt.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
}
usedIfNames = append(usedIfNames, opts.InterfaceName)
}
@@ -296,7 +296,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Allocate a lock for the container
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new container")
+ return nil, fmt.Errorf("error allocating lock for new container: %w", err)
}
ctr.lock = lock
ctr.config.LockID = ctr.lock.ID()
@@ -319,7 +319,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
} else {
ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime]
if !ok {
- return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime)
+ return nil, fmt.Errorf("requested OCI runtime %s is not available: %w", ctr.config.OCIRuntime, define.ErrInvalidArg)
}
ctr.ociRuntime = ociRuntime
}
@@ -327,7 +327,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Check NoCgroups support
if ctr.config.NoCgroups {
if !ctr.ociRuntime.SupportsNoCgroups() {
- return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not compatible with NoCgroups", ctr.ociRuntime.Name())
+ return nil, fmt.Errorf("requested OCI runtime %s is not compatible with NoCgroups: %w", ctr.ociRuntime.Name(), define.ErrInvalidArg)
}
}
@@ -336,7 +336,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Get the pod from state
pod, err = r.state.Pod(ctr.config.Pod)
if err != nil {
- return nil, errors.Wrapf(err, "cannot add container %s to pod %s", ctr.ID(), ctr.config.Pod)
+ return nil, fmt.Errorf("cannot add container %s to pod %s: %w", ctr.ID(), ctr.config.Pod, err)
}
}
@@ -350,14 +350,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
if pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra() {
podCgroup, err := pod.CgroupPath()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
+ return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err)
}
expectPodCgroup, err := ctr.expectPodCgroup()
if err != nil {
return nil, err
}
if expectPodCgroup && podCgroup == "" {
- return nil, errors.Wrapf(define.ErrInternal, "pod %s cgroup is not set", pod.ID())
+ return nil, fmt.Errorf("pod %s cgroup is not set: %w", pod.ID(), define.ErrInternal)
}
canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(podCgroup)
if canUseCgroup {
@@ -367,7 +367,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.CgroupParent = CgroupfsDefaultCgroupParent
}
} else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
}
case config.SystemdCgroupsManager:
if ctr.config.CgroupParent == "" {
@@ -375,7 +375,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
case pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra():
podCgroup, err := pod.CgroupPath()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
+ return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err)
}
ctr.config.CgroupParent = podCgroup
case rootless.IsRootless() && ctr.config.CgroupsMode != cgroupSplit:
@@ -384,10 +384,10 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.CgroupParent = SystemdDefaultCgroupParent
}
} else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
+ return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
+ return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
}
}
@@ -470,8 +470,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctrNamedVolumes = append(ctrNamedVolumes, dbVol)
// The volume exists, we're good
continue
- } else if errors.Cause(err) != define.ErrNoSuchVolume {
- return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name)
+ } else if !errors.Is(err, define.ErrNoSuchVolume) {
+ return nil, fmt.Errorf("error retrieving named volume %s for new container: %w", vol.Name, err)
}
}
@@ -504,7 +504,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
}
newVol, err := r.newVolume(false, volOptions...)
if err != nil {
- return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name)
+ return nil, fmt.Errorf("error creating named volume %q: %w", vol.Name, err)
}
ctrNamedVolumes = append(ctrNamedVolumes, newVol)
@@ -527,7 +527,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.ShmDir = filepath.Join(ctr.bundlePath(), "shm")
if err := os.MkdirAll(ctr.config.ShmDir, 0700); err != nil {
if !os.IsExist(err) {
- return nil, errors.Wrap(err, "unable to create shm dir")
+ return nil, fmt.Errorf("unable to create shm dir: %w", err)
}
}
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
@@ -596,7 +596,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// exist once we're done.
newConf, err := r.state.GetContainerConfig(c.ID())
if err != nil {
- return errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", c.ID())
+ return fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", c.ID(), err)
}
c.config = newConf
@@ -611,12 +611,12 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if c.config.Pod != "" && !removePod {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID())
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
}
// Lock the pod while we're removing container
if pod.config.LockID == c.config.LockID {
- return errors.Wrapf(define.ErrWillDeadlock, "container %s and pod %s share lock ID %d", c.ID(), pod.ID(), c.config.LockID)
+ return fmt.Errorf("container %s and pod %s share lock ID %d: %w", c.ID(), pod.ID(), c.config.LockID, define.ErrWillDeadlock)
}
pod.lock.Lock()
defer pod.lock.Unlock()
@@ -626,7 +626,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
infraID := pod.state.InfraContainerID
if c.ID() == infraID {
- return errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
+ return fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
}
}
@@ -693,7 +693,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
}
if len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
- return errors.Wrapf(define.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
+ return fmt.Errorf("container %s has dependent containers which must be removed before it: %s: %w", c.ID(), depsStr, define.ErrCtrExists)
}
}
@@ -705,8 +705,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
}
// Ignore ErrConmonDead - we couldn't retrieve the container's
// exit code properly, but it's still stopped.
- if err := c.stop(time); err != nil && errors.Cause(err) != define.ErrConmonDead {
- return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
+ if err := c.stop(time); err != nil && !errors.Is(err, define.ErrConmonDead) {
+ return fmt.Errorf("cannot remove container %s as it could not be stopped: %w", c.ID(), err)
}
// We unlocked as part of stop() above - there's a chance someone
@@ -717,7 +717,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if ok, _ := r.state.HasContainer(c.ID()); !ok {
// When the container has already been removed, the OCI runtime directory remain.
if err := c.cleanupRuntime(ctx); err != nil {
- return errors.Wrapf(err, "error cleaning up container %s from OCI runtime", c.ID())
+ return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err)
}
return nil
}
@@ -729,7 +729,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// Do this before we set ContainerStateRemoving, to ensure that we can
// actually remove from the OCI runtime.
if err := c.cleanup(ctx); err != nil {
- cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
+ cleanupErr = fmt.Errorf("error cleaning up container %s: %w", c.ID(), err)
}
// Set ContainerStateRemoving
@@ -739,7 +739,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr != nil {
logrus.Errorf(err.Error())
}
- return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID())
+ return fmt.Errorf("unable to set container %s removing state in database: %w", c.ID(), err)
}
// Remove all active exec sessions
@@ -789,7 +789,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// Deallocate the container's lock
if err := c.lock.Free(); err != nil {
if cleanupErr == nil {
- cleanupErr = errors.Wrapf(err, "error freeing lock for container %s", c.ID())
+ cleanupErr = fmt.Errorf("error freeing lock for container %s: %w", c.ID(), err)
} else {
logrus.Errorf("Free container lock: %v", err)
}
@@ -809,8 +809,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if !volume.Anonymous() {
continue
}
- if err := runtime.removeVolume(ctx, volume, false, timeout, false); err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
- if errors.Cause(err) == define.ErrVolumeBeingUsed {
+ if err := runtime.removeVolume(ctx, volume, false, timeout, false); err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
+ if errors.Is(err, define.ErrVolumeBeingUsed) {
// Ignore error, since podman will report original error
volumesFrom, _ := c.volumesFrom()
if len(volumesFrom) > 0 {
@@ -891,7 +891,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
c := new(Container)
c.config, err = r.state.GetContainerConfig(id)
if err != nil {
- return id, errors.Wrapf(err, "failed to retrieve config for ctr ID %q", id)
+ return id, fmt.Errorf("failed to retrieve config for ctr ID %q: %w", id, err)
}
c.state = new(ContainerState)
@@ -903,7 +903,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
if c.config.Pod != "" {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
- return id, errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID())
+ return id, fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
}
// Lock the pod while we're removing container
@@ -918,7 +918,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
return "", err
}
if c.ID() == infraID {
- return id, errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
+ return id, fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
}
}
@@ -1115,7 +1115,7 @@ func (r *Runtime) GetContainersByList(containers []string) ([]*Container, error)
for _, inputContainer := range containers {
ctr, err := r.LookupContainer(inputContainer)
if err != nil {
- return ctrs, errors.Wrapf(err, "unable to look up container %s", inputContainer)
+ return ctrs, fmt.Errorf("unable to look up container %s: %w", inputContainer, err)
}
ctrs = append(ctrs, ctr)
}
@@ -1128,7 +1128,7 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
var lastCreatedTime time.Time
ctrs, err := r.GetAllContainers()
if err != nil {
- return nil, errors.Wrapf(err, "unable to find latest container")
+ return nil, fmt.Errorf("unable to find latest container: %w", err)
}
if len(ctrs) == 0 {
return nil, define.ErrNoSuchCtr
@@ -1209,7 +1209,7 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) ([]*reports.Pru
// MountStorageContainer mounts the storage container's root filesystem
func (r *Runtime) MountStorageContainer(id string) (string, error) {
if _, err := r.GetContainer(id); err == nil {
- return "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
container, err := r.store.Container(id)
if err != nil {
@@ -1217,7 +1217,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) {
}
mountPoint, err := r.store.Mount(container.ID, "")
if err != nil {
- return "", errors.Wrapf(err, "error mounting storage for container %s", id)
+ return "", fmt.Errorf("error mounting storage for container %s: %w", id, err)
}
return mountPoint, nil
}
@@ -1225,7 +1225,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) {
// UnmountStorageContainer unmounts the storage container's root filesystem
func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
if _, err := r.GetContainer(id); err == nil {
- return false, errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return false, fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
container, err := r.store.Container(id)
if err != nil {
@@ -1239,7 +1239,7 @@ func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
func (r *Runtime) IsStorageContainerMounted(id string) (bool, string, error) {
var path string
if _, err := r.GetContainer(id); err == nil {
- return false, "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return false, "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
mountCnt, err := r.storageService.MountedContainerImage(id)
@@ -1265,13 +1265,13 @@ func (r *Runtime) StorageContainers() ([]storage.Container, error) {
storeContainers, err := r.store.Containers()
if err != nil {
- return nil, errors.Wrapf(err, "error reading list of all storage containers")
+ return nil, fmt.Errorf("error reading list of all storage containers: %w", err)
}
retCtrs := []storage.Container{}
for _, container := range storeContainers {
exists, err := r.state.HasContainer(container.ID)
if err != nil && err != define.ErrNoSuchCtr {
- return nil, errors.Wrapf(err, "failed to check if %s container exists in database", container.ID)
+ return nil, fmt.Errorf("failed to check if %s container exists in database: %w", container.ID, err)
}
if exists {
continue
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index b13482722..d04607d2e 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -2,6 +2,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -13,7 +15,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -40,14 +41,14 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
if ctr.config.IsInfra {
pod, err := r.state.Pod(ctr.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod)
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", ctr.ID(), ctr.config.Pod, err)
}
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
- return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
+ return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
}
} else {
if err := r.removeContainer(ctx, ctr, true, false, false, timeout); err != nil {
- return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
+ return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
}
}
}
@@ -106,7 +107,7 @@ func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions,
func DownloadFromFile(reader *os.File) (string, error) {
outFile, err := ioutil.TempFile(util.Tmpdir(), "import")
if err != nil {
- return "", errors.Wrap(err, "error creating file")
+ return "", fmt.Errorf("error creating file: %w", err)
}
defer outFile.Close()
@@ -114,7 +115,7 @@ func DownloadFromFile(reader *os.File) (string, error) {
_, err = io.Copy(outFile, reader)
if err != nil {
- return "", errors.Wrapf(err, "error saving %s to %s", reader.Name(), outFile.Name())
+ return "", fmt.Errorf("error saving %s to %s: %w", reader.Name(), outFile.Name(), err)
}
return outFile.Name(), nil
diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go
index f56cb83a4..139638a6b 100644
--- a/libpod/runtime_migrate.go
+++ b/libpod/runtime_migrate.go
@@ -14,7 +14,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,21 +21,21 @@ func (r *Runtime) stopPauseProcess() error {
if rootless.IsRootless() {
pausePidPath, err := util.GetRootlessPauseProcessPidPathGivenDir(r.config.Engine.TmpDir)
if err != nil {
- return errors.Wrapf(err, "could not get pause process pid file path")
+ return fmt.Errorf("could not get pause process pid file path: %w", err)
}
data, err := ioutil.ReadFile(pausePidPath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrap(err, "cannot read pause process pid file")
+ return fmt.Errorf("cannot read pause process pid file: %w", err)
}
pausePid, err := strconv.Atoi(string(data))
if err != nil {
- return errors.Wrapf(err, "cannot parse pause pid file %s", pausePidPath)
+ return fmt.Errorf("cannot parse pause pid file %s: %w", pausePidPath, err)
}
if err := os.Remove(pausePidPath); err != nil {
- return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath)
+ return fmt.Errorf("cannot delete pause pid file %s: %w", pausePidPath, err)
}
if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil {
return err
@@ -60,7 +59,7 @@ func (r *Runtime) migrate() error {
for _, ctr := range runningContainers {
fmt.Printf("stopped %s\n", ctr.ID())
if err := ctr.Stop(); err != nil {
- return errors.Wrapf(err, "cannot stop container %s", ctr.ID())
+ return fmt.Errorf("cannot stop container %s: %w", ctr.ID(), err)
}
}
@@ -68,7 +67,7 @@ func (r *Runtime) migrate() error {
runtimeChangeRequested := r.migrateRuntime != ""
requestedRuntime, runtimeExists := r.ociRuntimes[r.migrateRuntime]
if !runtimeExists && runtimeChangeRequested {
- return errors.Wrapf(define.ErrInvalidArg, "change to runtime %q requested but no such runtime is defined", r.migrateRuntime)
+ return fmt.Errorf("change to runtime %q requested but no such runtime is defined: %w", r.migrateRuntime, define.ErrInvalidArg)
}
for _, ctr := range allCtrs {
@@ -93,7 +92,7 @@ func (r *Runtime) migrate() error {
if needsWrite {
if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
- return errors.Wrapf(err, "error rewriting config for container %s", ctr.ID())
+ return fmt.Errorf("error rewriting config for container %s: %w", ctr.ID(), err)
}
}
}
diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go
index ee3d40484..25e48de14 100644
--- a/libpod/runtime_pod.go
+++ b/libpod/runtime_pod.go
@@ -2,11 +2,12 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"time"
"github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
)
// Contains the public Runtime API for pods
@@ -112,7 +113,7 @@ func (r *Runtime) GetLatestPod() (*Pod, error) {
var lastCreatedTime time.Time
pods, err := r.GetAllPods()
if err != nil {
- return nil, errors.Wrapf(err, "unable to get all pods")
+ return nil, fmt.Errorf("unable to get all pods: %w", err)
}
if len(pods) == 0 {
return nil, define.ErrNoSuchPod
@@ -146,7 +147,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) {
pods = append(pods, c.PodID())
pod, err := r.GetPod(c.PodID())
if err != nil {
- if errors.Cause(err) == define.ErrPodRemoved || errors.Cause(err) == define.ErrNoSuchPod {
+ if errors.Is(err, define.ErrPodRemoved) || errors.Is(err, define.ErrNoSuchPod) {
continue
}
return nil, err
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 1f9ebe724..75ff24e41 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path"
@@ -18,7 +19,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/specgen"
runcconfig "github.com/opencontainers/runc/libcontainer/configs"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,14 +38,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
for _, option := range options {
if err := option(pod); err != nil {
- return nil, errors.Wrapf(err, "error running pod create option")
+ return nil, fmt.Errorf("error running pod create option: %w", err)
}
}
// Allocate a lock for the pod
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new pod")
+ return nil, fmt.Errorf("error allocating lock for new pod: %w", err)
}
pod.lock = lock
pod.config.LockID = pod.lock.ID()
@@ -70,7 +70,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
if pod.config.CgroupParent == "" {
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
@@ -108,14 +108,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
pod.config.CgroupParent = SystemdDefaultCgroupParent
}
} else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
+ return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
if pod.config.UsePodCgroup {
cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.InfraContainerSpec.ResourceLimits)
if err != nil {
- return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID())
+ return nil, fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err)
}
pod.state.CgroupPath = cgroupPath
if p.InfraContainerSpec != nil {
@@ -123,7 +123,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
}
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
+ return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
}
}
@@ -132,7 +132,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
}
if !pod.HasInfraContainer() && pod.SharesNamespaces() {
- return nil, errors.Errorf("Pods must have an infra container to share namespaces")
+ return nil, errors.New("Pods must have an infra container to share namespaces")
}
if pod.HasInfraContainer() && !pod.SharesNamespaces() {
logrus.Infof("Pod has an infra container, but shares no namespaces")
@@ -157,12 +157,12 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
if addPodErr = r.state.AddPod(pod); addPodErr == nil {
return pod, nil
}
- if !generateName || (errors.Cause(addPodErr) != define.ErrPodExists && errors.Cause(addPodErr) != define.ErrCtrExists) {
+ if !generateName || (!errors.Is(addPodErr, define.ErrPodExists) && !errors.Is(addPodErr, define.ErrCtrExists)) {
break
}
}
if addPodErr != nil {
- return nil, errors.Wrapf(addPodErr, "error adding pod to state")
+ return nil, fmt.Errorf("error adding pod to state: %w", addPodErr)
}
return pod, nil
@@ -211,7 +211,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
force = true
}
if !removeCtrs && numCtrs > 0 {
- return errors.Wrapf(define.ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID())
+ return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
}
// Go through and lock all containers so we can operate on them all at
@@ -239,7 +239,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Ensure state appropriate for removal
if err := ctr.checkReadyForRemoval(); err != nil {
- return errors.Wrapf(err, "pod %s has containers that are not ready to be removed", p.ID())
+ return fmt.Errorf("pod %s has containers that are not ready to be removed: %w", p.ID(), err)
}
}
@@ -311,7 +311,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
for volName := range ctrNamedVolumes {
volume, err := r.state.Volume(volName)
- if err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
+ if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
logrus.Errorf("Retrieving volume %s: %v", volName, err)
continue
}
@@ -319,7 +319,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
continue
}
if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil {
- if errors.Cause(err) == define.ErrNoSuchVolume || errors.Cause(err) == define.ErrVolumeRemoved {
+ if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) {
continue
}
logrus.Errorf("Removing volume %s: %v", volName, err)
@@ -340,7 +340,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
case config.SystemdCgroupsManager:
if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -354,7 +354,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID())
+ removalErr = fmt.Errorf("error retrieving pod %s conmon cgroup: %w", p.ID(), err)
} else {
logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
@@ -362,7 +362,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
if err == nil {
if err = conmonCgroup.Delete(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s conmon cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
@@ -371,7 +371,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
cgroup, err := cgroups.Load(p.state.CgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error retrieving pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -379,7 +379,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
if err == nil {
if err := cgroup.Delete(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -390,7 +390,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// keep going so we make sure to evict the pod before
// ending up with an inconsistent state.
if removalErr == nil {
- removalErr = errors.Wrapf(define.ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.Engine.CgroupManager, p.ID())
+ removalErr = fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal)
} else {
logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID())
}
@@ -416,7 +416,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Deallocate the pod lock
if err := p.lock.Free(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error freeing pod %s lock", p.ID())
+ removalErr = fmt.Errorf("error freeing pod %s lock: %w", p.ID(), err)
} else {
logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err)
}
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index db055f40b..9149dd72f 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -1,8 +1,9 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/events"
- "github.com/pkg/errors"
)
// renumberLocks reassigns lock numbers for all containers and pods in the
@@ -26,7 +27,7 @@ func (r *Runtime) renumberLocks() error {
for _, ctr := range allCtrs {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
+ return fmt.Errorf("error allocating lock for container %s: %w", ctr.ID(), err)
}
ctr.config.LockID = lock.ID()
@@ -43,7 +44,7 @@ func (r *Runtime) renumberLocks() error {
for _, pod := range allPods {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
+ return fmt.Errorf("error allocating lock for pod %s: %w", pod.ID(), err)
}
pod.config.LockID = lock.ID()
@@ -60,7 +61,7 @@ func (r *Runtime) renumberLocks() error {
for _, vol := range allVols {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name())
+ return fmt.Errorf("error allocating lock for volume %s: %w", vol.Name(), err)
}
vol.config.LockID = lock.ID()
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index 6872db21d..9efb30e03 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -2,11 +2,11 @@ package libpod
import (
"context"
+ "errors"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/domain/entities/reports"
- "github.com/pkg/errors"
)
// Contains the public Runtime API for volumes
@@ -133,7 +133,7 @@ func (r *Runtime) PruneVolumes(ctx context.Context, filterFuncs []VolumeFilter)
report.Id = vol.Name()
var timeout *uint
if err := r.RemoveVolume(ctx, vol, false, timeout); err != nil {
- if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved {
+ if !errors.Is(err, define.ErrVolumeBeingUsed) && !errors.Is(err, define.ErrVolumeRemoved) {
report.Err = err
} else {
// We didn't remove the volume for some reason
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index da8c3712d..a751d75d2 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -17,7 +18,6 @@ import (
"github.com/containers/storage/drivers/quota"
"github.com/containers/storage/pkg/stringid"
pluginapi "github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -36,7 +36,7 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
volume := newVolume(r)
for _, option := range options {
if err := option(volume); err != nil {
- return nil, errors.Wrapf(err, "running volume create option")
+ return nil, fmt.Errorf("running volume create option: %w", err)
}
}
@@ -51,17 +51,17 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
// Check if volume with given name exists.
exists, err := r.state.HasVolume(volume.config.Name)
if err != nil {
- return nil, errors.Wrapf(err, "checking if volume with name %s exists", volume.config.Name)
+ return nil, fmt.Errorf("checking if volume with name %s exists: %w", volume.config.Name, err)
}
if exists {
- return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name)
+ return nil, fmt.Errorf("volume with name %s already exists: %w", volume.config.Name, define.ErrVolumeExists)
}
// Plugin can be nil if driver is local, but that's OK - superfluous
// assignment doesn't hurt much.
plugin, err := r.getVolumePlugin(volume.config)
if err != nil {
- return nil, errors.Wrapf(err, "volume %s uses volume plugin %s but it could not be retrieved", volume.config.Name, volume.config.Driver)
+ return nil, fmt.Errorf("volume %s uses volume plugin %s but it could not be retrieved: %w", volume.config.Name, volume.config.Driver, err)
}
volume.plugin = plugin
@@ -73,13 +73,13 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
case "device":
if strings.ToLower(volume.config.Options["type"]) == "bind" {
if _, err := os.Stat(val); err != nil {
- return nil, errors.Wrapf(err, "invalid volume option %s for driver 'local'", key)
+ return nil, fmt.Errorf("invalid volume option %s for driver 'local': %w", key, err)
}
}
case "o", "type", "uid", "gid", "size", "inodes", "noquota", "copy", "nocopy":
// Do nothing, valid keys
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "invalid mount option %s for driver 'local'", key)
+ return nil, fmt.Errorf("invalid mount option %s for driver 'local': %w", key, define.ErrInvalidArg)
}
}
}
@@ -99,17 +99,17 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
// Create the mountpoint of this volume
volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name)
if err := os.MkdirAll(volPathRoot, 0700); err != nil {
- return nil, errors.Wrapf(err, "creating volume directory %q", volPathRoot)
+ return nil, fmt.Errorf("creating volume directory %q: %w", volPathRoot, err)
}
if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
+ return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", volPathRoot, volume.config.UID, volume.config.GID, err)
}
fullVolPath := filepath.Join(volPathRoot, "_data")
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
- return nil, errors.Wrapf(err, "creating volume directory %q", fullVolPath)
+ return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
}
if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID)
+ return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
}
if err := LabelVolumePath(fullVolPath); err != nil {
return nil, err
@@ -134,7 +134,7 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
}
if projectQuotaSupported {
if err := q.SetQuota(fullVolPath, quota); err != nil {
- return nil, errors.Wrapf(err, "failed to set size quota size=%d inodes=%d for volume directory %q", volume.config.Size, volume.config.Inodes, fullVolPath)
+ return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, fullVolPath, err)
}
}
}
@@ -144,7 +144,7 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "allocating lock for new volume")
+ return nil, fmt.Errorf("allocating lock for new volume: %w", err)
}
volume.lock = lock
volume.config.LockID = volume.lock.ID()
@@ -161,7 +161,7 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
// Add the volume to state
if err := r.state.AddVolume(volume); err != nil {
- return nil, errors.Wrapf(err, "adding volume to state")
+ return nil, fmt.Errorf("adding volume to state: %w", err)
}
defer volume.newVolumeEvent(events.Create)
return volume, nil
@@ -272,7 +272,7 @@ func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin
createReq.Name = name
createReq.Options = options
if err := plugin.CreateVolume(createReq); err != nil {
- return errors.Wrapf(err, "creating volume %q in plugin %s", name, plugin.Name)
+ return fmt.Errorf("creating volume %q in plugin %s: %w", name, plugin.Name, err)
}
}
@@ -305,7 +305,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
if !force {
- return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
+ return fmt.Errorf("volume %s is being used by the following container(s): %s: %w", v.Name(), depsStr, define.ErrVolumeBeingUsed)
}
// We need to remove all containers using the volume
@@ -314,17 +314,17 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if err != nil {
// If the container's removed, no point in
// erroring.
- if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
continue
}
- return errors.Wrapf(err, "removing container %s that depends on volume %s", dep, v.Name())
+ return fmt.Errorf("removing container %s that depends on volume %s: %w", dep, v.Name(), err)
}
logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
if err := r.removeContainer(ctx, ctr, force, false, false, timeout); err != nil {
- return errors.Wrapf(err, "removing container %s that depends on volume %s", ctr.ID(), v.Name())
+ return fmt.Errorf("removing container %s that depends on volume %s: %w", ctr.ID(), v.Name(), err)
}
}
}
@@ -337,7 +337,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
// them.
logrus.Errorf("Unmounting volume %s: %v", v.Name(), err)
} else {
- return errors.Wrapf(err, "unmounting volume %s", v.Name())
+ return fmt.Errorf("unmounting volume %s: %w", v.Name(), err)
}
}
@@ -353,7 +353,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
// Do we have a volume driver?
if v.plugin == nil {
canRemove = false
- removalErr = errors.Wrapf(define.ErrMissingPlugin, "cannot remove volume %s from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ removalErr = fmt.Errorf("cannot remove volume %s from plugin %s, but it has been removed from Podman: %w", v.Name(), v.Driver(), define.ErrMissingPlugin)
} else {
// Ping the plugin first to verify the volume still
// exists.
@@ -364,14 +364,14 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
getReq.Name = v.Name()
if _, err := v.plugin.GetVolume(getReq); err != nil {
canRemove = false
- removalErr = errors.Wrapf(err, "volume %s could not be retrieved from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ removalErr = fmt.Errorf("volume %s could not be retrieved from plugin %s, but it has been removed from Podman: %w", v.Name(), v.Driver(), err)
}
}
if canRemove {
req := new(pluginapi.RemoveRequest)
req.Name = v.Name()
if err := v.plugin.RemoveVolume(req); err != nil {
- return errors.Wrapf(err, "volume %s could not be removed from plugin %s", v.Name(), v.Driver())
+ return fmt.Errorf("volume %s could not be removed from plugin %s: %w", v.Name(), v.Driver(), err)
}
}
}
@@ -381,13 +381,13 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if removalErr != nil {
logrus.Errorf("Removing volume %s from plugin %s: %v", v.Name(), v.Driver(), removalErr)
}
- return errors.Wrapf(err, "removing volume %s", v.Name())
+ return fmt.Errorf("removing volume %s: %w", v.Name(), err)
}
// Free the volume's lock
if err := v.lock.Free(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "freeing lock for volume %s", v.Name())
+ removalErr = fmt.Errorf("freeing lock for volume %s: %w", v.Name(), err)
} else {
logrus.Errorf("Freeing lock for volume %q: %v", v.Name(), err)
}
@@ -397,7 +397,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
// from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "cleaning up volume storage for %q", v.Name())
+ removalErr = fmt.Errorf("cleaning up volume storage for %q: %w", v.Name(), err)
} else {
logrus.Errorf("Cleaning up volume storage for volume %q: %v", v.Name(), err)
}
diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go
index 411b0efe9..38fe0196a 100644
--- a/pkg/api/handlers/compat/containers.go
+++ b/pkg/api/handlers/compat/containers.go
@@ -2,6 +2,7 @@ package compat
import (
"encoding/json"
+ "errors"
"fmt"
"net/http"
"sort"
@@ -27,7 +28,6 @@ import (
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -46,7 +46,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -73,7 +73,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
reports, err := containerEngine.ContainerRm(r.Context(), []string{name}, options)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
@@ -83,7 +83,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
}
if len(reports) > 0 && reports[0].Err != nil {
err = reports[0].Err
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
@@ -110,12 +110,12 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to decode filter parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode filter parameters for %s: %w", r.URL.String(), err))
return
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -164,7 +164,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
for _, ctnr := range containers {
api, err := LibpodToContainer(ctnr, query.Size)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
// container was removed between the initial fetch of the list and conversion
logrus.Debugf("Container %s removed between initial fetch and conversion, ignoring in output", ctnr.ID())
continue
@@ -187,7 +187,7 @@ func GetContainer(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -215,7 +215,7 @@ func KillContainer(w http.ResponseWriter, r *http.Request) {
Signal: "KILL",
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -228,12 +228,12 @@ func KillContainer(w http.ResponseWriter, r *http.Request) {
}
report, err := containerEngine.ContainerKill(r.Context(), []string{name}, options)
if err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid ||
- errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) ||
+ errors.Is(err, define.ErrCtrStopped) {
utils.Error(w, http.StatusConflict, err)
return
}
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
utils.ContainerNotFound(w, name, err)
return
}
@@ -512,7 +512,7 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON,
for ep := range inspect.HostConfig.PortBindings {
splitp := strings.SplitN(ep, "/", 2)
if len(splitp) != 2 {
- return nil, errors.Errorf("PORT/PROTOCOL Format required for %q", ep)
+ return nil, fmt.Errorf("PORT/PROTOCOL Format required for %q", ep)
}
exposedPort, err := nat.NewPort(splitp[1], splitp[0])
if err != nil {
@@ -616,7 +616,7 @@ func RenameContainer(w http.ResponseWriter, r *http.Request) {
Name string `schema:"name"`
}{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -627,7 +627,7 @@ func RenameContainer(w http.ResponseWriter, r *http.Request) {
}
if _, err := runtime.RenameContainer(r.Context(), ctr, query.Name); err != nil {
- if errors.Cause(err) == define.ErrPodExists || errors.Cause(err) == define.ErrCtrExists {
+ if errors.Is(err, define.ErrPodExists) || errors.Is(err, define.ErrCtrExists) {
utils.Error(w, http.StatusConflict, err)
return
}
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index 6fdd5c6a7..9da21d15f 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -161,8 +161,9 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
func CreateNetwork(w http.ResponseWriter, r *http.Request) {
var (
- networkCreate types.NetworkCreateRequest
- network nettypes.Network
+ networkCreate types.NetworkCreateRequest
+ network nettypes.Network
+ responseWarning string
)
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
if err := json.NewDecoder(r.Body).Decode(&networkCreate); err != nil {
@@ -179,8 +180,40 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
network.Internal = networkCreate.Internal
network.IPv6Enabled = networkCreate.EnableIPv6
- // FIXME use docker options and convert them to valid libpod options
- // network.Options = networkCreate.Options
+ network.Options = make(map[string]string)
+
+ // TODO: we should consider making this constants in c/common/libnetwork/types
+ for opt, optVal := range networkCreate.Options {
+ switch opt {
+ case "mtu":
+ fallthrough
+ case "com.docker.network.driver.mtu":
+ if network.Driver == nettypes.BridgeNetworkDriver {
+ network.Options["mtu"] = optVal
+ }
+ case "icc":
+ fallthrough
+ case "com.docker.network.bridge.enable_icc":
+ // TODO: needs to be implemented
+ if network.Driver == nettypes.BridgeNetworkDriver {
+ responseWarning = "com.docker.network.bridge.enable_icc is not currently implemented"
+ }
+ case "com.docker.network.bridge.name":
+ if network.Driver == nettypes.BridgeNetworkDriver {
+ network.NetworkInterface = optVal
+ }
+ case "mode":
+ if network.Driver == nettypes.MacVLANNetworkDriver || network.Driver == nettypes.IPVLANNetworkDriver {
+ network.Options[opt] = optVal
+ }
+ case "parent":
+ if network.Driver == nettypes.MacVLANNetworkDriver || network.Driver == nettypes.IPVLANNetworkDriver {
+ network.NetworkInterface = optVal
+ }
+ default:
+ responseWarning = "\"" + opt + ": " + optVal + "\" is not a recognized option"
+ }
+ }
// dns is only enabled for the bridge driver
if network.Driver == nettypes.BridgeNetworkDriver {
@@ -242,9 +275,10 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
body := struct {
ID string `json:"Id"`
- Warning string
+ Warning string `json:"Warning"`
}{
- ID: newNetwork.ID,
+ ID: newNetwork.ID,
+ Warning: responseWarning,
}
utils.WriteResponse(w, http.StatusCreated, body)
}
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index 5b92358fa..92fd94390 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -2,6 +2,7 @@ package libpod
import (
"encoding/json"
+ "errors"
"fmt"
"net/http"
"strings"
@@ -19,7 +20,6 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -33,11 +33,11 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
)
psg := specgen.PodSpecGenerator{InfraContainerSpec: &specgen.SpecGenerator{}}
if err := json.NewDecoder(r.Body).Decode(&psg); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
return
}
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
return
}
if !psg.NoInfra {
@@ -51,17 +51,17 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
}
err = specgenutil.FillOutSpecGen(psg.InfraContainerSpec, &infraOptions, []string{}) // necessary for default values in many cases (userns, idmappings)
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error filling out specgen"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error filling out specgen: %w", err))
return
}
out, err := json.Marshal(psg) // marshal our spec so the matching options can be unmarshaled into infra
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
return
}
err = json.Unmarshal(out, psg.InfraContainerSpec) // unmarhal matching options
if err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
return
}
// a few extra that do not have the same json tags
@@ -75,10 +75,10 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
pod, err := generate.MakePod(&podSpecComplete, runtime)
if err != nil {
httpCode := http.StatusInternalServerError
- if errors.Cause(err) == define.ErrPodExists {
+ if errors.Is(err, define.ErrPodExists) {
httpCode = http.StatusConflict
}
- utils.Error(w, httpCode, errors.Wrap(err, "failed to make pod"))
+ utils.Error(w, httpCode, fmt.Errorf("failed to make pod: %w", err))
return
}
utils.WriteResponse(w, http.StatusCreated, entities.IDResponse{ID: pod.ID()})
@@ -89,7 +89,7 @@ func Pods(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -139,7 +139,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
name := utils.GetName(r)
@@ -164,7 +164,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
} else {
responses, stopError = pod.Stop(r.Context(), false)
}
- if stopError != nil && errors.Cause(stopError) != define.ErrPodPartialFail {
+ if stopError != nil && !errors.Is(stopError, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
@@ -178,7 +178,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
report := entities.PodStopReport{Id: pod.ID()}
for id, err := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(err, "error stopping container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error stopping container %s: %w", id, err))
}
code := http.StatusOK
@@ -207,14 +207,14 @@ func PodStart(w http.ResponseWriter, r *http.Request) {
}
responses, err := pod.Start(r.Context())
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusConflict, err)
return
}
report := entities.PodStartReport{Id: pod.ID()}
for id, err := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(err, "error starting container "+id))
+ report.Errs = append(report.Errs, fmt.Errorf("%v: %w", "error starting container "+id, err))
}
code := http.StatusOK
@@ -237,7 +237,7 @@ func PodDelete(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
name := utils.GetName(r)
@@ -263,14 +263,14 @@ func PodRestart(w http.ResponseWriter, r *http.Request) {
return
}
responses, err := pod.Restart(r.Context())
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
report := entities.PodRestartReport{Id: pod.ID()}
for id, err := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(err, "error restarting container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error restarting container %s: %w", id, err))
}
code := http.StatusOK
@@ -314,14 +314,14 @@ func PodPause(w http.ResponseWriter, r *http.Request) {
return
}
responses, err := pod.Pause(r.Context())
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
report := entities.PodPauseReport{Id: pod.ID()}
for id, v := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error pausing container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error pausing container %s: %w", id, v))
}
code := http.StatusOK
@@ -340,14 +340,14 @@ func PodUnpause(w http.ResponseWriter, r *http.Request) {
return
}
responses, err := pod.Unpause(r.Context())
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
report := entities.PodUnpauseReport{Id: pod.ID()}
for id, v := range responses {
- report.Errs = append(report.Errs, errors.Wrapf(v, "error unpausing container %s", id))
+ report.Errs = append(report.Errs, fmt.Errorf("error unpausing container %s: %w", id, v))
}
code := http.StatusOK
@@ -374,7 +374,7 @@ func PodTop(w http.ResponseWriter, r *http.Request) {
PsArgs: psArgs,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -456,7 +456,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
// override any golang type defaults
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if _, found := r.URL.Query()["signal"]; found {
@@ -465,7 +465,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
sig, err := util.ParseSignal(signal)
if err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "unable to parse signal value"))
+ utils.InternalServerError(w, fmt.Errorf("unable to parse signal value: %w", err))
return
}
name := utils.GetName(r)
@@ -488,12 +488,12 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
}
}
if !hasRunning {
- utils.Error(w, http.StatusConflict, errors.Errorf("cannot kill a pod with no running containers: %s", pod.ID()))
+ utils.Error(w, http.StatusConflict, fmt.Errorf("cannot kill a pod with no running containers: %s", pod.ID()))
return
}
responses, err := pod.Kill(r.Context(), uint(sig))
- if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
+ if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
utils.Error(w, http.StatusInternalServerError, err)
return
}
@@ -534,7 +534,7 @@ func PodStats(w http.ResponseWriter, r *http.Request) {
// default would go here
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
- utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -549,13 +549,12 @@ func PodStats(w http.ResponseWriter, r *http.Request) {
reports, err := containerEngine.PodStats(r.Context(), query.NamesOrIDs, options)
// Error checks as documented in swagger.
- switch errors.Cause(err) {
- case define.ErrNoSuchPod:
- utils.Error(w, http.StatusNotFound, err)
- return
- case nil:
- // Nothing to do.
- default:
+ if err != nil {
+ if errors.Is(err, define.ErrNoSuchPod) {
+ utils.Error(w, http.StatusNotFound, err)
+ return
+ }
+
utils.InternalServerError(w, err)
return
}
diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go
index e792dea35..5eac76f5b 100644
--- a/pkg/api/handlers/libpod/volumes.go
+++ b/pkg/api/handlers/libpod/volumes.go
@@ -2,9 +2,12 @@ package libpod
import (
"encoding/json"
+ "fmt"
"net/http"
"net/url"
+ "errors"
+
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
@@ -16,7 +19,6 @@ import (
"github.com/containers/podman/v4/pkg/domain/infra/abi/parse"
"github.com/containers/podman/v4/pkg/util"
"github.com/gorilla/schema"
- "github.com/pkg/errors"
)
func CreateVolume(w http.ResponseWriter, r *http.Request) {
@@ -30,14 +32,14 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
input := entities.VolumeCreateOptions{}
// decode params from body
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
- utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
}
@@ -108,7 +110,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
filterMap, err := util.PrepareFilters(r)
if err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
@@ -181,7 +183,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusInternalServerError,
- errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
name := utils.GetName(r)
@@ -191,7 +193,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
return
}
if err := runtime.RemoveVolume(r.Context(), vol, query.Force, query.Timeout); err != nil {
- if errors.Cause(err) == define.ErrVolumeBeingUsed {
+ if errors.Is(err, define.ErrVolumeBeingUsed) {
utils.Error(w, http.StatusConflict, err)
return
}
diff --git a/pkg/api/handlers/utils/errors.go b/pkg/api/handlers/utils/errors.go
index bf60b2c84..ab1b6f227 100644
--- a/pkg/api/handlers/utils/errors.go
+++ b/pkg/api/handlers/utils/errors.go
@@ -1,17 +1,18 @@
package utils
import (
+ "errors"
+ "fmt"
"net/http"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/storage"
- "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
var (
- ErrLinkNotSupport = errors.New("Link is not supported")
+ ErrLinkNotSupport = errors.New("link is not supported")
)
// TODO: document the exported functions in this file and make them more
@@ -25,7 +26,7 @@ func Error(w http.ResponseWriter, code int, err error) {
// Log detailed message of what happened to machine running podman service
log.Infof("Request Failed(%s): %s", http.StatusText(code), err.Error())
em := errorhandling.ErrorModel{
- Because: (errors.Cause(err)).Error(),
+ Because: errorhandling.Cause(err).Error(),
Message: err.Error(),
ResponseCode: code,
}
@@ -33,51 +34,50 @@ func Error(w http.ResponseWriter, code int, err error) {
}
func VolumeNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != define.ErrNoSuchVolume {
+ if !errors.Is(err, define.ErrNoSuchVolume) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func ContainerNotFound(w http.ResponseWriter, name string, err error) {
- switch errors.Cause(err) {
- case define.ErrNoSuchCtr, define.ErrCtrExists:
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrExists) {
Error(w, http.StatusNotFound, err)
- default:
+ } else {
InternalServerError(w, err)
}
}
func ImageNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != storage.ErrImageUnknown {
+ if !errors.Is(err, storage.ErrImageUnknown) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func NetworkNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != define.ErrNoSuchNetwork {
+ if !errors.Is(err, define.ErrNoSuchNetwork) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func PodNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != define.ErrNoSuchPod {
+ if !errors.Is(err, define.ErrNoSuchPod) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func SessionNotFound(w http.ResponseWriter, name string, err error) {
- if errors.Cause(err) != define.ErrNoSuchExecSession {
+ if !errors.Is(err, define.ErrNoSuchExecSession) {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
}
func SecretNotFound(w http.ResponseWriter, nameOrID string, err error) {
- if errors.Cause(err).Error() != "no such secret" {
+ if errorhandling.Cause(err).Error() != "no such secret" {
InternalServerError(w, err)
}
Error(w, http.StatusNotFound, err)
@@ -92,7 +92,7 @@ func InternalServerError(w http.ResponseWriter, err error) {
}
func BadRequest(w http.ResponseWriter, key string, value string, err error) {
- e := errors.Wrapf(err, "failed to parse query parameter '%s': %q", key, value)
+ e := fmt.Errorf("failed to parse query parameter '%s': %q: %w", key, value, err)
Error(w, http.StatusBadRequest, e)
}
diff --git a/pkg/bindings/manifests/manifests.go b/pkg/bindings/manifests/manifests.go
index aaa26d7e1..a68dd5a4e 100644
--- a/pkg/bindings/manifests/manifests.go
+++ b/pkg/bindings/manifests/manifests.go
@@ -231,7 +231,7 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp
err = errorhandling.JoinErrors(report.Errors)
if err != nil {
errModel := errorhandling.ErrorModel{
- Because: (errors.Cause(err)).Error(),
+ Because: errorhandling.Cause(err).Error(),
Message: err.Error(),
ResponseCode: response.StatusCode,
}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 281e448f6..1688be57e 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -2,6 +2,7 @@ package abi
import (
"context"
+ "errors"
"fmt"
"io/ioutil"
"os"
@@ -32,7 +33,6 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -80,7 +80,7 @@ func getContainersByContext(all, latest bool, names []string, runtime *libpod.Ru
func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrID string, options entities.ContainerExistsOptions) (*entities.BoolReport, error) {
_, err := ic.Libpod.LookupContainer(nameOrID)
if err != nil {
- if errors.Cause(err) != define.ErrNoSuchCtr {
+ if !errors.Is(err, define.ErrNoSuchCtr) {
return nil, err
}
if options.External {
@@ -120,7 +120,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
report := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := c.Pause()
- if err != nil && options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ if err != nil && options.All && errors.Is(err, define.ErrCtrStateInvalid) {
logrus.Debugf("Container %s is not running", c.ID())
continue
}
@@ -137,7 +137,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
report := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := c.Unpause()
- if err != nil && options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ if err != nil && options.All && errors.Is(err, define.ErrCtrStateInvalid) {
logrus.Debugf("Container %s is not paused", c.ID())
continue
}
@@ -148,7 +148,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
names := namesOrIds
ctrs, rawInputs, err := getContainersAndInputByContext(options.All, options.Latest, names, ic.Libpod)
- if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
+ if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) {
return nil, err
}
ctrMap := map[string]string{}
@@ -166,13 +166,13 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
}
if err != nil {
switch {
- case errors.Cause(err) == define.ErrCtrStopped:
+ case errors.Is(err, define.ErrCtrStopped):
logrus.Debugf("Container %s is already stopped", c.ID())
- case options.All && errors.Cause(err) == define.ErrCtrStateInvalid:
+ case options.All && errors.Is(err, define.ErrCtrStateInvalid):
logrus.Debugf("Container %s is not running, could not stop", c.ID())
// container never created in OCI runtime
// docker parity: do nothing just return container id
- case errors.Cause(err) == define.ErrCtrStateInvalid:
+ case errors.Is(err, define.ErrCtrStateInvalid):
logrus.Debugf("Container %s is either not created on runtime or is in a invalid state", c.ID())
default:
return err
@@ -238,7 +238,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
reports := make([]*entities.KillReport, 0, len(ctrs))
for _, con := range ctrs {
err := con.Kill(uint(sig))
- if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ if options.All && errors.Is(err, define.ErrCtrStateInvalid) {
logrus.Debugf("Container %s is not running", con.ID())
continue
}
@@ -289,8 +289,7 @@ func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Cont
return nil
}
logrus.Debugf("Failed to remove container %s: %s", ctr.ID(), err.Error())
- switch errors.Cause(err) {
- case define.ErrNoSuchCtr:
+ if errors.Is(err, define.ErrNoSuchCtr) {
// Ignore if the container does not exist (anymore) when either
// it has been requested by the user of if the container is a
// service one. Service containers are removed along with its
@@ -301,7 +300,7 @@ func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Cont
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
return nil
}
- case define.ErrCtrRemoved:
+ } else if errors.Is(err, define.ErrCtrRemoved) {
return nil
}
return err
@@ -317,15 +316,15 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
for _, ctr := range names {
report := reports.RmReport{Id: ctr}
report.Err = ic.Libpod.RemoveStorageContainer(ctr, options.Force)
- switch errors.Cause(report.Err) {
- case nil:
+ //nolint:gocritic
+ if report.Err == nil {
// remove container names that we successfully deleted
rmReports = append(rmReports, &report)
- case define.ErrNoSuchCtr, define.ErrCtrExists:
+ } else if errors.Is(report.Err, define.ErrNoSuchCtr) || errors.Is(report.Err, define.ErrCtrExists) {
// There is still a potential this is a libpod container
tmpNames = append(tmpNames, ctr)
- default:
- if _, err := ic.Libpod.LookupContainer(ctr); errors.Cause(err) == define.ErrNoSuchCtr {
+ } else {
+ if _, err := ic.Libpod.LookupContainer(ctr); errors.Is(err, define.ErrNoSuchCtr) {
// remove container failed, but not a libpod container
rmReports = append(rmReports, &report)
continue
@@ -337,7 +336,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
names = tmpNames
ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod)
- if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
+ if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) {
// Failed to get containers. If force is specified, get the containers ID
// and evict them
if !options.Force {
@@ -349,7 +348,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
report := reports.RmReport{Id: ctr}
_, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
if err != nil {
- if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
+ if options.Ignore && errors.Is(err, define.ErrNoSuchCtr) {
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
rmReports = append(rmReports, &report)
continue
@@ -426,7 +425,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
ctr, err := ic.Libpod.GetLatestContainer()
if err != nil {
if errors.Is(err, define.ErrNoSuchCtr) {
- return nil, []error{errors.Wrapf(err, "no containers to inspect")}, nil
+ return nil, []error{fmt.Errorf("no containers to inspect: %w", err)}, nil
}
return nil, nil, err
}
@@ -452,7 +451,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
// ErrNoSuchCtr is non-fatal, other errors will be
// treated as fatal.
if errors.Is(err, define.ErrNoSuchCtr) {
- errs = append(errs, errors.Errorf("no such container %s", name))
+ errs = append(errs, fmt.Errorf("no such container %s", name))
continue
}
return nil, nil, err
@@ -463,7 +462,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
// ErrNoSuchCtr is non-fatal, other errors will be
// treated as fatal.
if errors.Is(err, define.ErrNoSuchCtr) {
- errs = append(errs, errors.Errorf("no such container %s", name))
+ errs = append(errs, fmt.Errorf("no such container %s", name))
continue
}
return nil, nil, err
@@ -487,7 +486,7 @@ func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.To
container, err = ic.Libpod.LookupContainer(options.NameOrID)
}
if err != nil {
- return nil, errors.Wrap(err, "unable to look up requested container")
+ return nil, fmt.Errorf("unable to look up requested container: %w", err)
}
// Run Top.
@@ -512,12 +511,12 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string,
case "oci":
mimeType = buildah.OCIv1ImageManifest
if len(options.Message) > 0 {
- return nil, errors.Errorf("messages are only compatible with the docker image format (-f docker)")
+ return nil, fmt.Errorf("messages are only compatible with the docker image format (-f docker)")
}
case "docker":
mimeType = manifest.DockerV2Schema2MediaType
default:
- return nil, errors.Errorf("unrecognized image format %q", options.Format)
+ return nil, fmt.Errorf("unrecognized image format %q", options.Format)
}
sc := ic.Libpod.SystemContext()
@@ -660,7 +659,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
// CRImportCheckpoint is expected to import exactly one container from checkpoint image
checkpointImageImportErrors = append(
checkpointImageImportErrors,
- errors.Errorf("unable to import checkpoint from image: %q: %v", nameOrID, err),
+ fmt.Errorf("unable to import checkpoint from image: %q: %v", nameOrID, err),
)
} else {
containers = append(containers, importedContainers[0])
@@ -720,16 +719,16 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string,
ctr := ctrs[0]
conState, err := ctr.State()
if err != nil {
- return errors.Wrapf(err, "unable to determine state of %s", ctr.ID())
+ return fmt.Errorf("unable to determine state of %s: %w", ctr.ID(), err)
}
if conState != define.ContainerStateRunning {
- return errors.Errorf("you can only attach to running containers")
+ return fmt.Errorf("you can only attach to running containers")
}
// If the container is in a pod, also set to recursively start dependencies
err = terminal.StartAttachCtr(ctx, ctr, options.Stdout, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, false)
- if err != nil && errors.Cause(err) != define.ErrDetach {
- return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
+ if err != nil && !errors.Is(err, define.ErrDetach) {
+ return fmt.Errorf("error attaching to container %s: %w", ctr.ID(), err)
}
os.Stdout.WriteString("\n")
return nil
@@ -751,12 +750,12 @@ func makeExecConfig(options entities.ExecOptions, rt *libpod.Runtime) (*libpod.E
storageConfig := rt.StorageConfig()
runtimeConfig, err := rt.GetConfig()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving Libpod configuration to build exec exit command")
+ return nil, fmt.Errorf("error retrieving Libpod configuration to build exec exit command: %w", err)
}
// TODO: Add some ability to toggle syslog
exitCommandArgs, err := specgenutil.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), false, true)
if err != nil {
- return nil, errors.Wrapf(err, "error constructing exit command for exec session")
+ return nil, fmt.Errorf("error constructing exit command for exec session: %w", err)
}
execConfig.ExitCommand = exitCommandArgs
@@ -774,7 +773,7 @@ func checkExecPreserveFDs(options entities.ExecOptions) error {
for _, e := range entries {
i, err := strconv.Atoi(e.Name())
if err != nil {
- return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
+ return fmt.Errorf("cannot parse %s in /proc/self/fd: %w", e.Name(), err)
}
m[i] = true
}
@@ -891,7 +890,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
if options.Attach {
err = terminal.StartAttachCtr(ctx, ctr, options.Stdout, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, !ctrRunning)
- if errors.Cause(err) == define.ErrDetach {
+ if errors.Is(err, define.ErrDetach) {
// User manually detached
// Exit cleanly immediately
reports = append(reports, &entities.ContainerStartReport{
@@ -903,7 +902,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
return reports, nil
}
- if errors.Cause(err) == define.ErrWillDeadlock {
+ if errors.Is(err, define.ErrWillDeadlock) {
logrus.Debugf("Deadlock error: %v", err)
reports = append(reports, &entities.ContainerStartReport{
Id: ctr.ID(),
@@ -911,7 +910,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
Err: err,
ExitCode: define.ExitCode(err),
})
- return reports, errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
+ return reports, fmt.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
}
if ctrRunning {
@@ -936,7 +935,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
logrus.Errorf("Removing container %s: %v", ctr.ID(), err)
}
}
- return reports, errors.Wrapf(err, "unable to start container %s", ctr.ID())
+ return reports, fmt.Errorf("unable to start container %s: %w", ctr.ID(), err)
}
exitCode = ic.GetContainerExitCode(ctx, ctr)
@@ -960,12 +959,12 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
}
if err := ctr.Start(ctx, true); err != nil {
report.Err = err
- if errors.Cause(err) == define.ErrWillDeadlock {
- report.Err = errors.Wrapf(err, "please run 'podman system renumber' to resolve deadlocks")
+ if errors.Is(err, define.ErrWillDeadlock) {
+ report.Err = fmt.Errorf("please run 'podman system renumber' to resolve deadlocks: %w", err)
reports = append(reports, report)
continue
}
- report.Err = errors.Wrapf(err, "unable to start container %q", ctr.ID())
+ report.Err = fmt.Errorf("unable to start container %q: %w", ctr.ID(), err)
reports = append(reports, report)
if ctr.AutoRemove() {
if err := ic.removeContainer(ctx, ctr, entities.RmOptions{}); err != nil {
@@ -1001,7 +1000,7 @@ func (ic *ContainerEngine) Diff(ctx context.Context, namesOrIDs []string, opts e
if opts.Latest {
ctnr, err := ic.Libpod.GetLatestContainer()
if err != nil {
- return nil, errors.Wrap(err, "unable to get latest container")
+ return nil, fmt.Errorf("unable to get latest container: %w", err)
}
base = ctnr.ID()
}
@@ -1064,7 +1063,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
// We've manually detached from the container
// Do not perform cleanup, or wait for container exit code
// Just exit immediately
- if errors.Cause(err) == define.ErrDetach {
+ if errors.Is(err, define.ErrDetach) {
report.ExitCode = 0
return &report, nil
}
@@ -1074,10 +1073,10 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
logrus.Debugf("unable to remove container %s after failing to start and attach to it", ctr.ID())
}
}
- if errors.Cause(err) == define.ErrWillDeadlock {
+ if errors.Is(err, define.ErrWillDeadlock) {
logrus.Debugf("Deadlock error on %q: %v", ctr.ID(), err)
report.ExitCode = define.ExitCode(err)
- return &report, errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
+ return &report, fmt.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
}
report.ExitCode = define.ExitCode(err)
return &report, err
@@ -1086,8 +1085,8 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
if opts.Rm && !ctr.ShouldRestart(ctx) {
var timeout *uint
if err := ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout); err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr ||
- errors.Cause(err) == define.ErrCtrRemoved {
+ if errors.Is(err, define.ErrNoSuchCtr) ||
+ errors.Is(err, define.ErrCtrRemoved) {
logrus.Infof("Container %s was already removed, skipping --rm", ctr.ID())
} else {
logrus.Errorf("Removing container %s: %v", ctr.ID(), err)
@@ -1180,12 +1179,12 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
var timeout *uint
err = ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout)
if err != nil {
- report.RmErr = errors.Wrapf(err, "failed to clean up and remove container %v", ctr.ID())
+ report.RmErr = fmt.Errorf("failed to clean up and remove container %v: %w", ctr.ID(), err)
}
} else {
err := ctr.Cleanup(ctx)
if err != nil {
- report.CleanErr = errors.Wrapf(err, "failed to clean up container %v", ctr.ID())
+ report.CleanErr = fmt.Errorf("failed to clean up container %v: %w", ctr.ID(), err)
}
}
@@ -1212,7 +1211,7 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin
err := ctr.Init(ctx, ctr.PodID() != "")
// If we're initializing all containers, ignore invalid state errors
- if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
+ if options.All && errors.Is(err, define.ErrCtrStateInvalid) {
err = nil
}
report.Err = err
@@ -1323,7 +1322,7 @@ func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []str
if mounted {
report := entities.ContainerUnmountReport{Id: sctr.ID}
if _, report.Err = ic.Libpod.UnmountStorageContainer(sctr.ID, options.Force); report.Err != nil {
- if errors.Cause(report.Err) != define.ErrCtrExists {
+ if !errors.Is(report.Err, define.ErrCtrExists) {
reports = append(reports, &report)
}
} else {
@@ -1357,11 +1356,11 @@ func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []str
report := entities.ContainerUnmountReport{Id: ctr.ID()}
if err := ctr.Unmount(options.Force); err != nil {
- if options.All && errors.Cause(err) == storage.ErrLayerNotMounted {
+ if options.All && errors.Is(err, storage.ErrLayerNotMounted) {
logrus.Debugf("Error umounting container %s, storage.ErrLayerNotMounted", ctr.ID())
continue
}
- report.Err = errors.Wrapf(err, "error unmounting container %s", ctr.ID())
+ report.Err = fmt.Errorf("error unmounting container %s: %w", ctr.ID(), err)
}
reports = append(reports, &report)
}
@@ -1410,7 +1409,7 @@ func (ic *ContainerEngine) Shutdown(_ context.Context) {
func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) (statsChan chan entities.ContainerStatsReport, err error) {
if options.Interval < 1 {
- return nil, errors.New("Invalid interval, must be a positive number greater zero")
+ return nil, errors.New("invalid interval, must be a positive number greater zero")
}
if rootless.IsRootless() {
unified, err := cgroups.IsCgroup2UnifiedMode()
@@ -1465,19 +1464,18 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri
computeStats := func() ([]define.ContainerStats, error) {
containers, err = containerFunc()
if err != nil {
- return nil, errors.Wrapf(err, "unable to get list of containers")
+ return nil, fmt.Errorf("unable to get list of containers: %w", err)
}
reportStats := []define.ContainerStats{}
for _, ctr := range containers {
stats, err := ctr.GetContainerStats(containerStats[ctr.ID()])
if err != nil {
- cause := errors.Cause(err)
- if queryAll && (cause == define.ErrCtrRemoved || cause == define.ErrNoSuchCtr || cause == define.ErrCtrStateInvalid) {
+ if queryAll && (errors.Is(err, define.ErrCtrRemoved) || errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrStateInvalid)) {
continue
}
- if cause == cgroups.ErrCgroupV1Rootless {
- err = cause
+ if errors.Is(err, cgroups.ErrCgroupV1Rootless) {
+ err = cgroups.ErrCgroupV1Rootless
}
return nil, err
}
diff --git a/pkg/errorhandling/errorhandling.go b/pkg/errorhandling/errorhandling.go
index fc6772c08..9b456c9c0 100644
--- a/pkg/errorhandling/errorhandling.go
+++ b/pkg/errorhandling/errorhandling.go
@@ -1,11 +1,11 @@
package errorhandling
import (
+ "errors"
"os"
"strings"
"github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -121,3 +121,22 @@ func (e PodConflictErrorModel) Error() string {
func (e PodConflictErrorModel) Code() int {
return 409
}
+
+// Cause returns the most underlying error for the provided one. There is a
+// maximum error depth of 100 to avoid endless loops. An additional error log
+// message will be created if this maximum has reached.
+func Cause(err error) (cause error) {
+ cause = err
+
+ const maxDepth = 100
+ for i := 0; i <= maxDepth; i++ {
+ res := errors.Unwrap(cause)
+ if res == nil {
+ return cause
+ }
+ cause = res
+ }
+
+ logrus.Errorf("Max error depth of %d reached, cannot unwrap until root cause: %v", maxDepth, err)
+ return cause
+}
diff --git a/pkg/errorhandling/errorhandling_test.go b/pkg/errorhandling/errorhandling_test.go
new file mode 100644
index 000000000..ec720c5e7
--- /dev/null
+++ b/pkg/errorhandling/errorhandling_test.go
@@ -0,0 +1,53 @@
+package errorhandling
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCause(t *testing.T) {
+ t.Parallel()
+
+ for _, tc := range []struct {
+ name string
+ err func() error
+ expectedErr error
+ }{
+ {
+ name: "nil error",
+ err: func() error { return nil },
+ expectedErr: nil,
+ },
+ {
+ name: "equal errors",
+ err: func() error { return errors.New("foo") },
+ expectedErr: errors.New("foo"),
+ },
+ {
+ name: "wrapped error",
+ err: func() error { return fmt.Errorf("baz: %w", fmt.Errorf("bar: %w", errors.New("foo"))) },
+ expectedErr: errors.New("foo"),
+ },
+ {
+ name: "max depth reached",
+ err: func() error {
+ err := errors.New("error")
+ for i := 0; i <= 101; i++ {
+ err = fmt.Errorf("%d: %w", i, err)
+ }
+ return err
+ },
+ expectedErr: fmt.Errorf("0: %w", errors.New("error")),
+ },
+ } {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ err := Cause(tc.err())
+ assert.Equal(t, tc.expectedErr, err)
+ })
+ }
+}
diff --git a/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go b/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go
index d05984dac..69613321f 100644
--- a/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go
+++ b/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go
@@ -48,7 +48,7 @@ const (
var (
Zero = int64Amount{}
- // Used by quantity strings - treat as read only
+ // Used by quantity strings - treat as read-only
zeroBytes = []byte("0")
)
diff --git a/pkg/k8s.io/apimachinery/pkg/api/resource/math.go b/pkg/k8s.io/apimachinery/pkg/api/resource/math.go
index 9d03f5c05..59a4c14de 100644
--- a/pkg/k8s.io/apimachinery/pkg/api/resource/math.go
+++ b/pkg/k8s.io/apimachinery/pkg/api/resource/math.go
@@ -29,13 +29,13 @@ const (
)
var (
- // Commonly needed big.Int values-- treat as read only!
+ // Commonly needed big.Int values-- treat as read-only!
bigTen = big.NewInt(10)
bigZero = big.NewInt(0)
bigOne = big.NewInt(1)
big1024 = big.NewInt(1024)
- // Commonly needed inf.Dec values-- treat as read only!
+ // Commonly needed inf.Dec values-- treat as read-only!
decZero = inf.NewDec(0, 0)
decOne = inf.NewDec(1, 0)
diff --git a/pkg/machine/config_test.go b/pkg/machine/config_test.go
index d9fc5425e..ca08660b9 100644
--- a/pkg/machine/config_test.go
+++ b/pkg/machine/config_test.go
@@ -1,3 +1,6 @@
+//go:build amd64 || arm64
+// +build amd64 arm64
+
package machine
import (
diff --git a/pkg/machine/qemu/config_test.go b/pkg/machine/qemu/config_test.go
index 4d96ec6e7..72cb3ed90 100644
--- a/pkg/machine/qemu/config_test.go
+++ b/pkg/machine/qemu/config_test.go
@@ -1,3 +1,6 @@
+//go:build (amd64 && !windows) || (arm64 && !windows)
+// +build amd64,!windows arm64,!windows
+
package qemu
import (
diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go
index 2fe0230cf..7e9c786a9 100644
--- a/pkg/machine/qemu/machine.go
+++ b/pkg/machine/qemu/machine.go
@@ -1013,7 +1013,7 @@ func (v *MachineVM) SSH(_ string, opts machine.SSHOptions) error {
port := strconv.Itoa(v.Port)
args := []string{"-i", v.IdentityPath, "-p", port, sshDestination, "-o", "UserKnownHostsFile=/dev/null",
- "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR"}
+ "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", "-o", "SetEnv=LC_ALL="}
if len(opts.Args) > 0 {
args = append(args, opts.Args...)
} else {
diff --git a/pkg/machine/qemu/machine_test.go b/pkg/machine/qemu/machine_test.go
index 62ca6068a..4c393d0f4 100644
--- a/pkg/machine/qemu/machine_test.go
+++ b/pkg/machine/qemu/machine_test.go
@@ -1,3 +1,6 @@
+//go:build (amd64 && !windows) || (arm64 && !windows)
+// +build amd64,!windows arm64,!windows
+
package qemu
import (
diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go
index 30c759495..8fdd87adf 100644
--- a/pkg/specgen/generate/container.go
+++ b/pkg/specgen/generate/container.go
@@ -38,10 +38,19 @@ func getImageFromSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGen
}
// Need to look up image.
- image, resolvedName, err := r.LibimageRuntime().LookupImage(s.Image, nil)
+ lookupOptions := &libimage.LookupImageOptions{ManifestList: true}
+ image, resolvedName, err := r.LibimageRuntime().LookupImage(s.Image, lookupOptions)
if err != nil {
return nil, "", nil, err
}
+ manifestList, err := image.ToManifestList()
+ // only process if manifest list found otherwise expect it to be regular image
+ if err == nil {
+ image, err = manifestList.LookupInstance(ctx, s.ImageArch, s.ImageOS, s.ImageVariant)
+ if err != nil {
+ return nil, "", nil, err
+ }
+ }
s.SetImage(image, resolvedName)
inspectData, err := image.Inspect(ctx, nil)
if err != nil {
diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go
index 689c740f0..c254b8192 100644
--- a/pkg/specgen/generate/kube/kube.go
+++ b/pkg/specgen/generate/kube/kube.go
@@ -511,12 +511,12 @@ func makeHealthCheck(inCmd string, interval int32, retries int32, timeout int32,
cmd := []string{}
if inCmd == "none" {
- cmd = []string{"NONE"}
+ cmd = []string{define.HealthConfigTestNone}
} else {
err := json.Unmarshal([]byte(inCmd), &cmd)
if err != nil {
// ...otherwise pass it to "/bin/sh -c" inside the container
- cmd = []string{"CMD-SHELL"}
+ cmd = []string{define.HealthConfigTestCmdShell}
cmd = append(cmd, strings.Split(inCmd, " ")...)
}
}
@@ -810,8 +810,8 @@ func envVarValueResourceFieldRef(env v1.EnvVar, opts *CtrSpecGenOptions) (*strin
}
// k8s rounds up the result to the nearest integer
- intValue := int(math.Ceil(value.AsApproximateFloat64() / divisor.AsApproximateFloat64()))
- stringValue := strconv.Itoa(intValue)
+ intValue := int64(math.Ceil(value.AsApproximateFloat64() / divisor.AsApproximateFloat64()))
+ stringValue := strconv.FormatInt(intValue, 10)
return &stringValue, nil
}
diff --git a/pkg/specgen/generate/kube/play_test.go b/pkg/specgen/generate/kube/play_test.go
index e01d62b08..466dab610 100644
--- a/pkg/specgen/generate/kube/play_test.go
+++ b/pkg/specgen/generate/kube/play_test.go
@@ -2,7 +2,6 @@ package kube
import (
"encoding/json"
- "fmt"
"math"
"runtime"
"strconv"
@@ -777,8 +776,7 @@ func TestEnvVarValue(t *testing.T) {
if test.expected == nilString {
assert.Nil(t, result)
} else {
- fmt.Println(*result, test.expected)
- assert.Equal(t, &(test.expected), result)
+ assert.Equal(t, test.expected, *result)
}
})
}
diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go
index 79e20667b..42b89ece1 100644
--- a/pkg/specgen/specgen.go
+++ b/pkg/specgen/specgen.go
@@ -103,6 +103,12 @@ type ContainerBasicConfig struct {
// RawImageName is the user-specified and unprocessed input referring
// to a local or a remote image.
RawImageName string `json:"raw_image_name,omitempty"`
+ // ImageOS is the user-specified image OS
+ ImageOS string `json:"image_os,omitempty"`
+ // ImageArch is the user-specified image architecture
+ ImageArch string `json:"image_arch,omitempty"`
+ // ImageVariant is the user-specified image variant
+ ImageVariant string `json:"image_variant,omitempty"`
// RestartPolicy is the container's restart policy - an action which
// will be taken when the container exits.
// If not given, the default policy, which does nothing, will be used.
diff --git a/pkg/specgen/volumes.go b/pkg/specgen/volumes.go
index 5e1ea9a78..c9f944abf 100644
--- a/pkg/specgen/volumes.go
+++ b/pkg/specgen/volumes.go
@@ -37,7 +37,7 @@ type OverlayVolume struct {
// ImageVolume is a volume based on a container image. The container image is
// first mounted on the host and is then bind-mounted into the container. An
-// ImageVolume is always mounted read only.
+// ImageVolume is always mounted read-only.
type ImageVolume struct {
// Source is the source of the image volume. The image can be referred
// to by name and by ID.
diff --git a/pkg/specgenutil/specgen.go b/pkg/specgenutil/specgen.go
index 8ad0a92e7..34350579d 100644
--- a/pkg/specgenutil/specgen.go
+++ b/pkg/specgenutil/specgen.go
@@ -873,23 +873,23 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
}
var concat string
- if cmdArr[0] == "CMD" || cmdArr[0] == "none" { // this is for compat, we are already split properly for most compat cases
+ if strings.ToUpper(cmdArr[0]) == define.HealthConfigTestCmd || strings.ToUpper(cmdArr[0]) == define.HealthConfigTestNone { // this is for compat, we are already split properly for most compat cases
cmdArr = strings.Fields(inCmd)
- } else if cmdArr[0] != "CMD-SHELL" { // this is for podman side of things, won't contain the keywords
+ } else if strings.ToUpper(cmdArr[0]) != define.HealthConfigTestCmdShell { // this is for podman side of things, won't contain the keywords
if isArr && len(cmdArr) > 1 { // an array of consecutive commands
- cmdArr = append([]string{"CMD"}, cmdArr...)
+ cmdArr = append([]string{define.HealthConfigTestCmd}, cmdArr...)
} else { // one singular command
if len(cmdArr) == 1 {
concat = cmdArr[0]
} else {
concat = strings.Join(cmdArr[0:], " ")
}
- cmdArr = append([]string{"CMD-SHELL"}, concat)
+ cmdArr = append([]string{define.HealthConfigTestCmdShell}, concat)
}
}
- if cmdArr[0] == "none" { // if specified to remove healtcheck
- cmdArr = []string{"NONE"}
+ if strings.ToUpper(cmdArr[0]) == define.HealthConfigTestNone { // if specified to remove healtcheck
+ cmdArr = []string{define.HealthConfigTestNone}
}
// healthcheck is by default an array, so we simply pass the user input
diff --git a/pkg/specgenutil/volumes.go b/pkg/specgenutil/volumes.go
index 50d745380..016166a20 100644
--- a/pkg/specgenutil/volumes.go
+++ b/pkg/specgenutil/volumes.go
@@ -605,7 +605,7 @@ func getNamedVolume(args []string) (*specgen.NamedVolume, error) {
// Parse the arguments into an image volume. An image volume is a volume based
// on a container image. The container image is first mounted on the host and
// is then bind-mounted into the container. An ImageVolume is always mounted
-// read only.
+// read-only.
func getImageVolume(args []string) (*specgen.ImageVolume, error) {
newVolume := new(specgen.ImageVolume)
diff --git a/test/compose/disable_healthcheck/docker-compose.yml b/test/compose/disable_healthcheck/docker-compose.yml
new file mode 100644
index 000000000..1f608c895
--- /dev/null
+++ b/test/compose/disable_healthcheck/docker-compose.yml
@@ -0,0 +1,10 @@
+version: "3.7"
+services:
+ noHc:
+ image: alpine
+ container_name: noHc
+ ports:
+ - "4000:80"
+ restart: unless-stopped
+ healthcheck:
+ disable: true
diff --git a/test/compose/disable_healthcheck/tests.sh b/test/compose/disable_healthcheck/tests.sh
new file mode 100644
index 000000000..2460a687e
--- /dev/null
+++ b/test/compose/disable_healthcheck/tests.sh
@@ -0,0 +1,2 @@
+podman inspect --format='{{.Config.Healthcheck.Test}}' noHc
+like $output "[NONE]" "$testname: healthcheck properly disabled"
diff --git a/test/compose/update_network_mtu/docker-compose.yml b/test/compose/update_network_mtu/docker-compose.yml
new file mode 100644
index 000000000..fabd7b4f2
--- /dev/null
+++ b/test/compose/update_network_mtu/docker-compose.yml
@@ -0,0 +1,26 @@
+version: '3.7'
+
+services:
+ nginx:
+ image: alpine
+ ports:
+ - 8000:5000
+ networks:
+ - default
+ - macvlan_net
+
+networks:
+ default:
+ driver: bridge
+ driver_opts:
+ com.docker.network.bridge.name: docker0
+ com.docker.network.driver.mtu: 9000
+ macvlan_net:
+ driver: macvlan
+ driver_opts:
+ mode: bridge
+ ipam:
+ config:
+ -
+ subnet: 192.168.20.0/24
+ gateway: 192.168.20.1
diff --git a/test/compose/update_network_mtu/tests.sh b/test/compose/update_network_mtu/tests.sh
new file mode 100644
index 000000000..57411eb34
--- /dev/null
+++ b/test/compose/update_network_mtu/tests.sh
@@ -0,0 +1,10 @@
+# -*- bash -*-
+
+podman network inspect --format='{{ range . }} {{ .Options.mtu }} {{ end }}' update_network_mtu_default
+like "$output" "9000" "$testname : network mtu is set"
+
+podman network inspect --format='{{ range . }} {{ .NetworkInterface }} {{ end }}' update_network_mtu_default
+like "$output" "docker0" "$testname: network interface is set"
+
+podman network inspect --format='{{ range . }} {{ .Options.mode }} {{ end }}' update_network_mtu_macvlan_net
+like "$output" "bridge" "$testname : network mode is set"
diff --git a/test/e2e/build/Containerfile.with-platform b/test/e2e/build/Containerfile.with-platform
new file mode 100644
index 000000000..3bb585a0a
--- /dev/null
+++ b/test/e2e/build/Containerfile.with-platform
@@ -0,0 +1 @@
+FROM --platform=$TARGETPLATFORM alpine
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index 261db8a9a..2fc967718 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -2,6 +2,7 @@ package integration
import (
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"math/rand"
@@ -30,7 +31,6 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -618,14 +618,14 @@ func (p *PodmanTestIntegration) RunHealthCheck(cid string) error {
restart := p.Podman([]string{"restart", cid})
restart.WaitWithDefaultTimeout()
if restart.ExitCode() != 0 {
- return errors.Errorf("unable to restart %s", cid)
+ return fmt.Errorf("unable to restart %s", cid)
}
}
}
fmt.Printf("Waiting for %s to pass healthcheck\n", cid)
time.Sleep(1 * time.Second)
}
- return errors.Errorf("unable to detect %s as running", cid)
+ return fmt.Errorf("unable to detect %s as running", cid)
}
func (p *PodmanTestIntegration) CreateSeccompJSON(in []byte) (string, error) {
@@ -1042,18 +1042,15 @@ var IPRegex = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01
// digShort execs into the given container and does a dig lookup with a timeout
// backoff. If it gets a response, it ensures that the output is in the correct
// format and iterates a string array for match
-func digShort(container, lookupName string, matchNames []string, p *PodmanTestIntegration) {
+func digShort(container, lookupName, expectedIP string, p *PodmanTestIntegration) {
digInterval := time.Millisecond * 250
for i := 0; i < 6; i++ {
time.Sleep(digInterval * time.Duration(i))
dig := p.Podman([]string{"exec", container, "dig", "+short", lookupName})
dig.WaitWithDefaultTimeout()
- if dig.ExitCode() == 0 {
- output := dig.OutputToString()
- Expect(output).To(MatchRegexp(IPRegex))
- for _, name := range matchNames {
- Expect(output).To(Equal(name))
- }
+ output := dig.OutputToString()
+ if dig.ExitCode() == 0 && output != "" {
+ Expect(output).To(Equal(expectedIP))
// success
return
}
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 61f2b3a1c..de4e4bfac 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -2507,7 +2507,7 @@ spec:
Expect(kube).To(ExitWithError())
})
- It("podman play kube test with read only HostPath volume", func() {
+ It("podman play kube test with read-only HostPath volume", func() {
hostPathLocation := filepath.Join(tempdir, "file")
f, err := os.Create(hostPathLocation)
Expect(err).To(BeNil())
diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go
index a48193e90..e463862f5 100644
--- a/test/e2e/pod_create_test.go
+++ b/test/e2e/pod_create_test.go
@@ -899,27 +899,6 @@ ENTRYPOINT ["sleep","99999"]
})
- It("podman pod create --device-read-bps", func() {
- SkipIfRootless("Cannot create devices in /dev in rootless mode")
- SkipIfRootlessCgroupsV1("Setting device-read-bps not supported on cgroupv1 for rootless users")
-
- podName := "testPod"
- session := podmanTest.Podman([]string{"pod", "create", "--device-read-bps", "/dev/zero:1mb", "--name", podName})
- session.WaitWithDefaultTimeout()
- Expect(session).Should(Exit(0))
-
- if CGROUPSV2 {
- session = podmanTest.Podman([]string{"run", "--rm", "--pod", podName, ALPINE, "sh", "-c", "cat /sys/fs/cgroup/$(sed -e 's|0::||' < /proc/self/cgroup)/io.max"})
- } else {
- session = podmanTest.Podman([]string{"run", "--rm", "--pod", podName, ALPINE, "cat", "/sys/fs/cgroup/blkio/blkio.throttle.read_bps_device"})
- }
- session.WaitWithDefaultTimeout()
- Expect(session).Should(Exit(0))
- if !CGROUPSV2 {
- Expect(session.OutputToString()).To(ContainSubstring("1048576"))
- }
- })
-
It("podman pod create --volumes-from", func() {
volName := "testVol"
volCreate := podmanTest.Podman([]string{"volume", "create", volName})
diff --git a/test/e2e/run_aardvark_test.go b/test/e2e/run_aardvark_test.go
index 25eb8b538..4a5800d04 100644
--- a/test/e2e/run_aardvark_test.go
+++ b/test/e2e/run_aardvark_test.go
@@ -53,7 +53,7 @@ var _ = Describe("Podman run networking", func() {
cip := ctrIP.OutputToString()
Expect(cip).To(MatchRegexp(IPRegex))
- digShort(cid, "aone", []string{cip}, podmanTest)
+ digShort(cid, "aone", cip, podmanTest)
reverseLookup := podmanTest.Podman([]string{"exec", cid, "dig", "+short", "-x", cip})
reverseLookup.WaitWithDefaultTimeout()
@@ -94,9 +94,9 @@ var _ = Describe("Podman run networking", func() {
cip2 := ctrIP2.OutputToString()
Expect(cip2).To(MatchRegexp(IPRegex))
- digShort("aone", "atwo", []string{cip2}, podmanTest)
+ digShort("aone", "atwo", cip2, podmanTest)
- digShort("atwo", "aone", []string{cip1}, podmanTest)
+ digShort("atwo", "aone", cip1, podmanTest)
reverseLookup12 := podmanTest.Podman([]string{"exec", cid1, "dig", "+short", "-x", cip2})
reverseLookup12.WaitWithDefaultTimeout()
@@ -143,17 +143,17 @@ var _ = Describe("Podman run networking", func() {
cip2 := ctrIP2.OutputToString()
Expect(cip2).To(MatchRegexp(IPRegex))
- digShort("aone", "atwo", []string{cip2}, podmanTest)
+ digShort("aone", "atwo", cip2, podmanTest)
- digShort("aone", "alias_a2", []string{cip2}, podmanTest)
+ digShort("aone", "alias_a2", cip2, podmanTest)
- digShort("aone", "alias_2a", []string{cip2}, podmanTest)
+ digShort("aone", "alias_2a", cip2, podmanTest)
- digShort("atwo", "aone", []string{cip1}, podmanTest)
+ digShort("atwo", "aone", cip1, podmanTest)
- digShort("atwo", "alias_a1", []string{cip1}, podmanTest)
+ digShort("atwo", "alias_a1", cip1, podmanTest)
- digShort("atwo", "alias_1a", []string{cip1}, podmanTest)
+ digShort("atwo", "alias_1a", cip1, podmanTest)
})
@@ -250,13 +250,13 @@ var _ = Describe("Podman run networking", func() {
cipA2B22 := ctrIPA2B22.OutputToString()
Expect(cipA2B22).To(MatchRegexp(IPRegex))
- digShort("aone", "atwobtwo", []string{cipA2B21}, podmanTest)
+ digShort("aone", "atwobtwo", cipA2B21, podmanTest)
- digShort("bone", "atwobtwo", []string{cipA2B22}, podmanTest)
+ digShort("bone", "atwobtwo", cipA2B22, podmanTest)
- digShort("atwobtwo", "aone", []string{cipA1}, podmanTest)
+ digShort("atwobtwo", "aone", cipA1, podmanTest)
- digShort("atwobtwo", "bone", []string{cipB1}, podmanTest)
+ digShort("atwobtwo", "bone", cipB1, podmanTest)
})
It("Aardvark Test 6: Three subnets, first container on 1/2 and second on 2/3, w/ network aliases", func() {
@@ -304,10 +304,9 @@ var _ = Describe("Podman run networking", func() {
Expect(ctrIPCB2).Should(Exit(0))
cipCB2 := ctrIPCB2.OutputToString()
- digShort("aone", "testB2_nw", []string{cipCB2}, podmanTest)
-
- digShort("cone", "testB1_nw", []string{cipAB1}, podmanTest)
+ digShort("aone", "testB2_nw", cipCB2, podmanTest)
+ digShort("cone", "testB1_nw", cipAB1, podmanTest)
})
})
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 828e92170..2aa5a78db 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -73,6 +73,28 @@ var _ = Describe("Podman run", func() {
Expect(session.OutputToString()).To(ContainSubstring("graphRootMounted=1"))
})
+ It("podman run from manifest list", func() {
+ session := podmanTest.Podman([]string{"manifest", "create", "localhost/test:latest"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"build", "-f", "build/Containerfile.with-platform", "--platform", "linux/amd64,linux/arm64", "--manifest", "localhost/test:latest"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"run", "--platform", "linux/arm64", "localhost/test", "uname", "-a"})
+ session.WaitWithDefaultTimeout()
+ exitCode := session.ExitCode()
+ // CI could either support requested platform or not, if it supports then output should contain `aarch64`
+ // if not run should fail with a very specific error i.e `Exec format error` anything other than this should
+ // be marked as failure of test.
+ if exitCode == 0 {
+ Expect(session.OutputToString()).To(ContainSubstring("aarch64"))
+ } else {
+ Expect(session.ErrorToString()).To(ContainSubstring("Exec format error"))
+ }
+ })
+
It("podman run a container based on a complex local image name", func() {
imageName := strings.TrimPrefix(nginx, "quay.io/")
session := podmanTest.Podman([]string{"run", imageName, "ls"})
@@ -1084,7 +1106,7 @@ USER mail`, BB)
Expect(session).Should(Exit(0))
ctrID := session.OutputToString()
- // check that the read only option works
+ // check that the read-only option works
session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":ro", ALPINE, "touch", mountpoint + "abc.txt"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(1))
@@ -1108,13 +1130,13 @@ USER mail`, BB)
Expect(session).Should(Exit(125))
Expect(session.ErrorToString()).To(ContainSubstring("cannot set :z more than once in mount options"))
- // create new read only volume
+ // create new read-only volume
session = podmanTest.Podman([]string{"create", "--volume", vol + ":" + mountpoint + ":ro", ALPINE, "cat", mountpoint + filename})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
ctrID = session.OutputToString()
- // check if the original volume was mounted as read only that --volumes-from also mount it as read only
+ // check if the original volume was mounted as read-only that --volumes-from also mount it as read-only
session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "touch", mountpoint + "abc.txt"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(1))
diff --git a/test/e2e/system_df_test.go b/test/e2e/system_df_test.go
index 712d16a6a..998fa8b59 100644
--- a/test/e2e/system_df_test.go
+++ b/test/e2e/system_df_test.go
@@ -97,4 +97,17 @@ var _ = Describe("podman system df", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
})
+
+ It("podman system df --format \"{{ json . }}\"", func() {
+ session := podmanTest.Podman([]string{"create", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{"system", "df", "--format", "{{ json . }}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.LineInOutputContains("Size"))
+ Expect(session.LineInOutputContains("Reclaimable"))
+ Expect(session.IsJSONOutputValid())
+ })
})
diff --git a/test/system/060-mount.bats b/test/system/060-mount.bats
index 2735d2afd..4498e675f 100644
--- a/test/system/060-mount.bats
+++ b/test/system/060-mount.bats
@@ -87,7 +87,7 @@ load helpers
# Run a container with an image mount
run_podman run --rm --mount type=image,src=$IMAGE,dst=/image-mount $IMAGE diff /etc/os-release /image-mount/etc/os-release
- # Make sure the mount is read only
+ # Make sure the mount is read-only
run_podman 1 run --rm --mount type=image,src=$IMAGE,dst=/image-mount $IMAGE touch /image-mount/read-only
is "$output" "touch: /image-mount/read-only: Read-only file system"
diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats
index a9456e03c..96b633a42 100644
--- a/test/system/130-kill.bats
+++ b/test/system/130-kill.bats
@@ -130,4 +130,14 @@ load helpers
is "$output" $cname
}
+@test "podman kill - concurrent stop" {
+ # 14761 - concurrent kill/stop must record the exit code
+ random_name=$(random_string 10)
+ run_podman run -d --replace --name=$random_name alpine sh -c "trap 'echo Received SIGTERM, ignoring' SIGTERM; echo READY; while :; do sleep 0.2; done"
+ $PODMAN stop -t 1 $random_name &
+ run_podman kill $random_name
+ run_podman wait $random_name
+ run_podman rm -f $random_name
+}
+
# vim: filetype=sh
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index 92d3966be..0e522b34d 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -479,21 +479,25 @@ spec:
fi
local name1="resources1"
- run_podman --cgroup-manager=systemd pod create --name=$name1 --cpus=5
- run_podman --cgroup-manager=systemd pod start $name1
+ run_podman --cgroup-manager=systemd pod create --name=$name1 --cpus=5 --memory=10m
+ run_podman --cgroup-manager=systemd pod start $name1
run_podman pod inspect --format '{{.CgroupPath}}' $name1
local path1="$output"
local actual1=$(< /sys/fs/cgroup/$path1/cpu.max)
is "$actual1" "500000 100000" "resource limits set properly"
+ local actual2=$(< /sys/fs/cgroup/$path1/memory.max)
+ is "$actual2" "10485760" "resource limits set properly"
run_podman pod --cgroup-manager=systemd rm -f $name1
local name2="resources2"
- run_podman --cgroup-manager=cgroupfs pod create --cpus=5 --name=$name2
+ run_podman --cgroup-manager=cgroupfs pod create --cpus=5 --memory=10m --name=$name2
run_podman --cgroup-manager=cgroupfs pod start $name2
run_podman pod inspect --format '{{.CgroupPath}}' $name2
local path2="$output"
local actual2=$(< /sys/fs/cgroup/$path2/cpu.max)
is "$actual2" "500000 100000" "resource limits set properly"
+ local actual2=$(< /sys/fs/cgroup/$path2/memory.max)
+ is "$actual2" "10485760" "resource limits set properly"
run_podman --cgroup-manager=cgroupfs pod rm $name2
}
diff --git a/test/testvol/main.go b/test/testvol/main.go
index 99c6fb694..dd4ba642d 100644
--- a/test/testvol/main.go
+++ b/test/testvol/main.go
@@ -1,6 +1,7 @@
package main
import (
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -8,7 +9,6 @@ import (
"time"
"github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -80,16 +80,16 @@ func startServer(socketPath string) error {
if config.path == "" {
path, err := ioutil.TempDir("", "test_volume_plugin")
if err != nil {
- return errors.Wrapf(err, "error getting directory for plugin")
+ return fmt.Errorf("error getting directory for plugin: %w", err)
}
config.path = path
} else {
pathStat, err := os.Stat(config.path)
if err != nil {
- return errors.Wrapf(err, "unable to access requested plugin state directory")
+ return fmt.Errorf("unable to access requested plugin state directory: %w", err)
}
if !pathStat.IsDir() {
- return errors.Errorf("cannot use %v as plugin state dir as it is not a directory", config.path)
+ return fmt.Errorf("cannot use %v as plugin state dir as it is not a directory", config.path)
}
}
@@ -98,7 +98,7 @@ func startServer(socketPath string) error {
server := volume.NewHandler(handle)
if err := server.ServeUnix(socketPath, 0); err != nil {
- return errors.Wrapf(err, "error starting server")
+ return fmt.Errorf("error starting server: %w", err)
}
return nil
}
@@ -147,7 +147,7 @@ func (d *DirDriver) Create(opts *volume.CreateRequest) error {
logrus.Infof("Hit Create() endpoint")
if _, exists := d.volumes[opts.Name]; exists {
- return errors.Errorf("volume with name %s already exists", opts.Name)
+ return fmt.Errorf("volume with name %s already exists", opts.Name)
}
newVol := new(dirVol)
@@ -161,7 +161,7 @@ func (d *DirDriver) Create(opts *volume.CreateRequest) error {
volPath := filepath.Join(d.volumesPath, opts.Name)
if err := os.Mkdir(volPath, 0755); err != nil {
- return errors.Wrapf(err, "error making volume directory")
+ return fmt.Errorf("error making volume directory: %w", err)
}
newVol.path = volPath
@@ -204,7 +204,7 @@ func (d *DirDriver) Get(req *volume.GetRequest) (*volume.GetResponse, error) {
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Did not find volume %s", req.Name)
- return nil, errors.Errorf("no volume with name %s found", req.Name)
+ return nil, fmt.Errorf("no volume with name %s found", req.Name)
}
logrus.Debugf("Found volume %s", req.Name)
@@ -228,19 +228,19 @@ func (d *DirDriver) Remove(req *volume.RemoveRequest) error {
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Did not find volume %s", req.Name)
- return errors.Errorf("no volume with name %s found", req.Name)
+ return fmt.Errorf("no volume with name %s found", req.Name)
}
logrus.Debugf("Found volume %s", req.Name)
if len(vol.mounts) > 0 {
logrus.Debugf("Cannot remove %s, is mounted", req.Name)
- return errors.Errorf("volume %s is mounted and cannot be removed", req.Name)
+ return fmt.Errorf("volume %s is mounted and cannot be removed", req.Name)
}
delete(d.volumes, req.Name)
if err := os.RemoveAll(vol.path); err != nil {
- return errors.Wrapf(err, "error removing mountpoint of volume %s", req.Name)
+ return fmt.Errorf("error removing mountpoint of volume %s: %w", req.Name, err)
}
logrus.Debugf("Removed volume %s", req.Name)
@@ -260,7 +260,7 @@ func (d *DirDriver) Path(req *volume.PathRequest) (*volume.PathResponse, error)
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Cannot locate volume %s", req.Name)
- return nil, errors.Errorf("no volume with name %s found", req.Name)
+ return nil, fmt.Errorf("no volume with name %s found", req.Name)
}
return &volume.PathResponse{
@@ -278,7 +278,7 @@ func (d *DirDriver) Mount(req *volume.MountRequest) (*volume.MountResponse, erro
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Cannot locate volume %s", req.Name)
- return nil, errors.Errorf("no volume with name %s found", req.Name)
+ return nil, fmt.Errorf("no volume with name %s found", req.Name)
}
vol.mounts[req.ID] = true
@@ -298,13 +298,13 @@ func (d *DirDriver) Unmount(req *volume.UnmountRequest) error {
vol, exists := d.volumes[req.Name]
if !exists {
logrus.Debugf("Cannot locate volume %s", req.Name)
- return errors.Errorf("no volume with name %s found", req.Name)
+ return fmt.Errorf("no volume with name %s found", req.Name)
}
mount := vol.mounts[req.ID]
if !mount {
logrus.Debugf("Volume %s is not mounted by %s", req.Name, req.ID)
- return errors.Errorf("volume %s is not mounted by %s", req.Name, req.ID)
+ return fmt.Errorf("volume %s is not mounted by %s", req.Name, req.ID)
}
delete(vol.mounts, req.ID)
diff --git a/troubleshooting.md b/troubleshooting.md
index 05685c906..1fa044fe9 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -663,7 +663,7 @@ $ podman run --rm --rootfs /path/to/rootfs true
The command above will create all the missing directories needed to run the container.
-After that, it can be used in read only mode, by multiple containers at the same time:
+After that, it can be used in read-only mode, by multiple containers at the same time:
```console
$ podman run --read-only --rootfs /path/to/rootfs ....
@@ -1231,3 +1231,58 @@ While running podman remote commands with the most updated Podman, issues that w
When upgrading Podman to a particular version for the required fixes, users often make the mistake of only upgrading the Podman client. However, suppose a setup uses `podman-remote` or uses a client that communicates with the Podman server on a remote machine via the REST API. In that case, it is required to upgrade both the Podman client and the Podman server running on the remote machine. Both the Podman client and server must be upgraded to the same version.
Example: If a particular bug was fixed in `v4.1.0` then the Podman client must have version `v4.1.0` as well the Podman server must have version `v4.1.0`.
+
+### 37) Unexpected carriage returns are outputted on the terminal
+
+When using the __--tty__ (__-t__) flag, unexpected carriage returns are outputted on the terminal.
+
+#### Symptom
+
+The container program prints a newline (`\n`) but the terminal outputs a carriage return and a newline (`\r\n`).
+
+```
+$ podman run --rm -t fedora echo abc | od -c
+0000000 a b c \r \n
+0000005
+```
+
+When run directly on the host, the result is as expected.
+
+```
+$ echo abc | od -c
+0000000 a b c \n
+0000004
+```
+
+Extra carriage returns can also shift the prompt to the right.
+
+```
+$ podman run --rm -t fedora sh -c "echo 1; echo 2; echo 3" | cat -A
+1^M$
+ 2^M$
+ 3^M$
+ $
+```
+
+#### Solution
+
+Run Podman without the __--tty__ (__-t__) flag.
+
+```
+$ podman run --rm fedora echo abc | od -c
+0000000 a b c \n
+0000004
+```
+
+The __--tty__ (__-t__) flag should only be used when the program requires user interaction in the termainal, for instance expecting
+the user to type an answer to a question.
+
+Where does the extra carriage return `\r` come from?
+
+The extra `\r` is not outputted by Podman but by the terminal. In fact, a reconfiguration of the terminal can make the extra `\r` go away.
+
+```
+$ podman run --rm -t fedora /bin/sh -c "stty -onlcr && echo abc" | od -c
+0000000 a b c \n
+0000004
+```
diff --git a/utils/ports.go b/utils/ports.go
index 57a6f8275..eea060433 100644
--- a/utils/ports.go
+++ b/utils/ports.go
@@ -1,26 +1,25 @@
package utils
import (
+ "fmt"
"net"
"strconv"
-
- "github.com/pkg/errors"
)
// Find a random, open port on the host.
func GetRandomPort() (int, error) {
l, err := net.Listen("tcp", ":0")
if err != nil {
- return 0, errors.Wrapf(err, "unable to get free TCP port")
+ return 0, fmt.Errorf("unable to get free TCP port: %w", err)
}
defer l.Close()
_, randomPort, err := net.SplitHostPort(l.Addr().String())
if err != nil {
- return 0, errors.Wrapf(err, "unable to determine free port")
+ return 0, fmt.Errorf("unable to determine free port: %w", err)
}
rp, err := strconv.Atoi(randomPort)
if err != nil {
- return 0, errors.Wrapf(err, "unable to convert random port to int")
+ return 0, fmt.Errorf("unable to convert random port to int: %w", err)
}
return rp, nil
}
diff --git a/utils/utils.go b/utils/utils.go
index 9239cf907..a20181b0a 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -16,7 +16,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/storage/pkg/archive"
"github.com/godbus/dbus/v5"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -114,7 +113,7 @@ func UntarToFileSystem(dest string, tarball *os.File, options *archive.TarOption
func CreateTarFromSrc(source string, dest string) error {
file, err := os.Create(dest)
if err != nil {
- return errors.Wrapf(err, "Could not create tarball file '%s'", dest)
+ return fmt.Errorf("could not create tarball file '%s': %w", dest, err)
}
defer file.Close()
return TarToFilesystem(source, file)
@@ -154,7 +153,7 @@ func RemoveScientificNotationFromFloat(x float64) (float64, error) {
}
result, err := strconv.ParseFloat(bigNum, 64)
if err != nil {
- return x, errors.Wrapf(err, "unable to remove scientific number from calculations")
+ return x, fmt.Errorf("unable to remove scientific number from calculations: %w", err)
}
return result, nil
}
@@ -181,11 +180,11 @@ func moveProcessPIDFileToScope(pidPath, slice, scope string) error {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrapf(err, "cannot read pid file %s", pidPath)
+ return fmt.Errorf("cannot read pid file %s: %w", pidPath, err)
}
pid, err := strconv.ParseUint(string(data), 10, 0)
if err != nil {
- return errors.Wrapf(err, "cannot parse pid file %s", pidPath)
+ return fmt.Errorf("cannot parse pid file %s: %w", pidPath, err)
}
return moveProcessToScope(int(pid), slice, scope)
diff --git a/utils/utils_supported.go b/utils/utils_supported.go
index 6378212b6..d7d47b2bc 100644
--- a/utils/utils_supported.go
+++ b/utils/utils_supported.go
@@ -17,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
"github.com/godbus/dbus/v5"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -77,7 +76,7 @@ func getCgroupProcess(procFile string, allowRoot bool) (string, error) {
line := scanner.Text()
parts := strings.SplitN(line, ":", 3)
if len(parts) != 3 {
- return "", errors.Errorf("cannot parse cgroup line %q", line)
+ return "", fmt.Errorf("cannot parse cgroup line %q", line)
}
if strings.HasPrefix(line, "0::") {
cgroup = line[3:]
@@ -88,7 +87,7 @@ func getCgroupProcess(procFile string, allowRoot bool) (string, error) {
}
}
if len(cgroup) == 0 || (!allowRoot && cgroup == "/") {
- return "", errors.Errorf("could not find cgroup mount in %q", procFile)
+ return "", fmt.Errorf("could not find cgroup mount in %q", procFile)
}
return cgroup, nil
}
@@ -133,7 +132,7 @@ func moveUnderCgroup(cgroup, subtree string, processes []uint32) error {
line := scanner.Text()
parts := strings.SplitN(line, ":", 3)
if len(parts) != 3 {
- return errors.Errorf("cannot parse cgroup line %q", line)
+ return fmt.Errorf("cannot parse cgroup line %q", line)
}
// root cgroup, skip it
diff --git a/utils/utils_windows.go b/utils/utils_windows.go
index 1d017f5ae..18f232116 100644
--- a/utils/utils_windows.go
+++ b/utils/utils_windows.go
@@ -3,7 +3,7 @@
package utils
-import "github.com/pkg/errors"
+import "errors"
func RunUnderSystemdScope(pid int, slice string, unitName string) error {
return errors.New("not implemented for windows")
diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go
index a45358f9b..ee9f584de 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go
@@ -152,10 +152,10 @@ func resourcesToProps(res *configs.Resources) (map[string]uint64, map[string]str
// Mem
if res.Memory != 0 {
- iMap["MemoryMax"] = res.Memory
+ uMap["MemoryMax"] = uint64(res.Memory)
}
if res.MemorySwap != 0 {
- iMap["MemorySwapMax"] = res.MemorySwap
+ uMap["MemorySwapMax"] = uint64(res.MemorySwap)
}
// Blkio
diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
index 3712afc71..0db77879c 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
@@ -221,6 +221,9 @@ func DefaultProfile() *Seccomp {
"ipc",
"keyctl",
"kill",
+ "landlock_add_rule",
+ "landlock_create_ruleset",
+ "landlock_restrict_self",
"lchown",
"lchown32",
"lgetxattr",
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
index 442632e7d..18674db4d 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
@@ -228,6 +228,9 @@
"ipc",
"keyctl",
"kill",
+ "landlock_add_rule",
+ "landlock_create_ruleset",
+ "landlock_restrict_self",
"lchown",
"lchown32",
"lgetxattr",
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 3e8657bc4..aaddcaeb0 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -111,7 +111,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.48.1-0.20220628131511-a8336c1613fe
+# github.com/containers/common v0.48.1-0.20220630172158-178929cf063e
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/define