summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go632
-rw-r--r--libpod/boltdb_state_internal.go200
-rw-r--r--libpod/boltdb_state_linux.go5
-rw-r--r--libpod/container.go101
-rw-r--r--libpod/container_api.go201
-rw-r--r--libpod/container_commit.go10
-rw-r--r--libpod/container_config.go19
-rw-r--r--libpod/container_copy_linux.go21
-rw-r--r--libpod/container_exec.go115
-rw-r--r--libpod/container_graph.go14
-rw-r--r--libpod/container_inspect.go34
-rw-r--r--libpod/container_internal.go336
-rw-r--r--libpod/container_internal_linux.go513
-rw-r--r--libpod/container_log.go22
-rw-r--r--libpod/container_log_linux.go33
-rw-r--r--libpod/container_log_unsupported.go6
-rw-r--r--libpod/container_path_resolution.go4
-rw-r--r--libpod/container_stat_linux.go9
-rw-r--r--libpod/container_top_linux.go16
-rw-r--r--libpod/container_validate.go39
-rw-r--r--libpod/define/container_inspect.go4
-rw-r--r--libpod/define/containerstate.go5
-rw-r--r--libpod/define/errors.go7
-rw-r--r--libpod/define/exec_codes.go6
-rw-r--r--libpod/define/healthchecks.go10
-rw-r--r--libpod/define/info.go12
-rw-r--r--libpod/define/pod_inspect.go5
-rw-r--r--libpod/define/terminal.go7
-rw-r--r--libpod/define/volume_inspect.go8
-rw-r--r--libpod/diff.go5
-rw-r--r--libpod/events.go18
-rw-r--r--libpod/events/config.go7
-rw-r--r--libpod/events/events.go14
-rw-r--r--libpod/events/events_linux.go6
-rw-r--r--libpod/events/events_unsupported.go2
-rw-r--r--libpod/events/filters.go10
-rw-r--r--libpod/events/journal_linux.go30
-rw-r--r--libpod/events/logfile.go6
-rw-r--r--libpod/healthcheck.go61
-rw-r--r--libpod/healthcheck_linux.go81
-rw-r--r--libpod/info.go105
-rw-r--r--libpod/kube.go39
-rw-r--r--libpod/lock/file/file_lock.go34
-rw-r--r--libpod/lock/in_memory_locks.go14
-rw-r--r--libpod/lock/shm/shm_lock.go35
-rw-r--r--libpod/lock/shm_lock_manager_linux.go10
-rw-r--r--libpod/logs/log.go12
-rw-r--r--libpod/logs/reversereader/reversereader.go8
-rw-r--r--libpod/networking_linux.go176
-rw-r--r--libpod/networking_slirp4netns.go104
-rw-r--r--libpod/oci.go39
-rw-r--r--libpod/oci_conmon_attach_linux.go (renamed from libpod/oci_attach_linux.go)70
-rw-r--r--libpod/oci_conmon_exec_linux.go71
-rw-r--r--libpod/oci_conmon_linux.go324
-rw-r--r--libpod/oci_missing.go19
-rw-r--r--libpod/oci_util.go25
-rw-r--r--libpod/options.go100
-rw-r--r--libpod/plugin/volume_api.go104
-rw-r--r--libpod/pod.go61
-rw-r--r--libpod/pod_api.go58
-rw-r--r--libpod/pod_internal.go9
-rw-r--r--libpod/pod_top_linux.go2
-rw-r--r--libpod/reset.go86
-rw-r--r--libpod/runtime.go199
-rw-r--r--libpod/runtime_cstorage.go27
-rw-r--r--libpod/runtime_ctr.go121
-rw-r--r--libpod/runtime_img.go13
-rw-r--r--libpod/runtime_migrate.go15
-rw-r--r--libpod/runtime_pod.go9
-rw-r--r--libpod/runtime_pod_linux.go77
-rw-r--r--libpod/runtime_renumber.go9
-rw-r--r--libpod/runtime_volume.go6
-rw-r--r--libpod/runtime_volume_linux.go154
-rw-r--r--libpod/runtime_worker.go33
-rw-r--r--libpod/service.go4
-rw-r--r--libpod/shutdown/handler.go4
-rw-r--r--libpod/state.go9
-rw-r--r--libpod/stats.go35
-rw-r--r--libpod/storage.go12
-rw-r--r--libpod/util.go21
-rw-r--r--libpod/util_linux.go38
-rw-r--r--libpod/volume.go2
-rw-r--r--libpod/volume_inspect.go8
-rw-r--r--libpod/volume_internal.go10
-rw-r--r--libpod/volume_internal_linux.go13
85 files changed, 2945 insertions, 2013 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 9745121c7..81f11410b 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -2,16 +2,18 @@ package libpod
import (
"bytes"
+ "errors"
"fmt"
"net"
"os"
+ "strconv"
"strings"
"sync"
+ "time"
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/libpod/define"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
@@ -63,6 +65,13 @@ type BoltState struct {
// initially created the database. This must match for any further instances
// that access the database, to ensure that state mismatches with
// containers/storage do not occur.
+// - exitCodeBucket/exitCodeTimeStampBucket: (#14559) exit codes must be part
+// of the database to resolve a previous race condition when one process waits
+// for the exit file to be written and another process removes it along with
+// the container during auto-removal. The same race would happen trying to
+// read the exit code from the containers bucket. Hence, exit codes go into
+// their own bucket. To avoid the rather expensive JSON (un)marshaling, we
+// have two buckets: one for the exit codes, the other for the timestamps.
// NewBoltState creates a new bolt-backed state database
func NewBoltState(path string, runtime *Runtime) (State, error) {
@@ -76,7 +85,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
db, err := bolt.Open(path, 0600, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error opening database %s", path)
+ return nil, fmt.Errorf("error opening database %s: %w", path, err)
}
// Everywhere else, we use s.deferredCloseDBCon(db) to ensure the state's DB
// mutex is also unlocked.
@@ -98,6 +107,8 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
allVolsBkt,
execBkt,
runtimeConfigBkt,
+ exitCodeBkt,
+ exitCodeTimeStampBkt,
}
// Does the DB need an update?
@@ -112,7 +123,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
return nil
})
if err != nil {
- return nil, errors.Wrapf(err, "error checking DB schema")
+ return nil, fmt.Errorf("error checking DB schema: %w", err)
}
if !needsUpdate {
@@ -124,13 +135,13 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
err = db.Update(func(tx *bolt.Tx) error {
for _, bkt := range createBuckets {
if _, err := tx.CreateBucketIfNotExists(bkt); err != nil {
- return errors.Wrapf(err, "error creating bucket %s", string(bkt))
+ return fmt.Errorf("error creating bucket %s: %w", string(bkt), err)
}
}
return nil
})
if err != nil {
- return nil, errors.Wrapf(err, "error creating buckets for DB")
+ return nil, fmt.Errorf("error creating buckets for DB: %w", err)
}
state.valid = true
@@ -162,6 +173,11 @@ func (s *BoltState) Refresh() error {
return err
}
+ namesBucket, err := getNamesBucket(tx)
+ if err != nil {
+ return err
+ }
+
ctrsBucket, err := getCtrBucket(tx)
if err != nil {
return err
@@ -187,11 +203,51 @@ func (s *BoltState) Refresh() error {
return err
}
+ exitCodeBucket, err := getExitCodeBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ // Clear all exec exit codes
+ toRemoveExitCodes := []string{}
+ err = exitCodeBucket.ForEach(func(id, _ []byte) error {
+ toRemoveExitCodes = append(toRemoveExitCodes, string(id))
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error reading exit codes bucket: %w", err)
+ }
+ for _, id := range toRemoveExitCodes {
+ if err := exitCodeBucket.Delete([]byte(id)); err != nil {
+ return fmt.Errorf("error removing exit code for ID %s: %w", id, err)
+ }
+ }
+
+ toRemoveTimeStamps := []string{}
+ err = timeStampBucket.ForEach(func(id, _ []byte) error {
+ toRemoveTimeStamps = append(toRemoveTimeStamps, string(id))
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("reading timestamps bucket: %w", err)
+ }
+ for _, id := range toRemoveTimeStamps {
+ if err := timeStampBucket.Delete([]byte(id)); err != nil {
+ return fmt.Errorf("removing timestamp for ID %s: %w", id, err)
+ }
+ }
+
// Iterate through all IDs. Check if they are containers.
// If they are, unmarshal their state, and then clear
// PID, mountpoint, and state for all of them
// Then save the modified state
// Also clear all network namespaces
+ toRemoveIDs := []string{}
err = idBucket.ForEach(func(id, name []byte) error {
ctrBkt := ctrsBucket.Bucket(id)
if ctrBkt == nil {
@@ -199,20 +255,28 @@ func (s *BoltState) Refresh() error {
podBkt := podsBucket.Bucket(id)
if podBkt == nil {
// This is neither a pod nor a container
- // Error out on the dangling ID
- return errors.Wrapf(define.ErrInternal, "id %s is not a pod or a container", string(id))
+ // Something is seriously wrong, but
+ // continue on and try to clean up the
+ // state and become consistent.
+ // Just note what needs to be removed
+ // for now - ForEach says you shouldn't
+ // remove things from the table during
+ // it.
+ logrus.Errorf("Database issue: dangling ID %s found (not a pod or container) - removing", string(id))
+ toRemoveIDs = append(toRemoveIDs, string(id))
+ return nil
}
// Get the state
stateBytes := podBkt.Get(stateKey)
if stateBytes == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s missing state key", string(id))
+ return fmt.Errorf("pod %s missing state key: %w", string(id), define.ErrInternal)
}
state := new(podState)
if err := json.Unmarshal(stateBytes, state); err != nil {
- return errors.Wrapf(err, "error unmarshalling state for pod %s", string(id))
+ return fmt.Errorf("error unmarshalling state for pod %s: %w", string(id), err)
}
// Clear the Cgroup path
@@ -220,11 +284,11 @@ func (s *BoltState) Refresh() error {
newStateBytes, err := json.Marshal(state)
if err != nil {
- return errors.Wrapf(err, "error marshalling modified state for pod %s", string(id))
+ return fmt.Errorf("error marshalling modified state for pod %s: %w", string(id), err)
}
if err := podBkt.Put(stateKey, newStateBytes); err != nil {
- return errors.Wrapf(err, "error updating state for pod %s in DB", string(id))
+ return fmt.Errorf("error updating state for pod %s in DB: %w", string(id), err)
}
// It's not a container, nothing to do
@@ -233,30 +297,30 @@ func (s *BoltState) Refresh() error {
// First, delete the network namespace
if err := ctrBkt.Delete(netNSKey); err != nil {
- return errors.Wrapf(err, "error removing network namespace for container %s", string(id))
+ return fmt.Errorf("error removing network namespace for container %s: %w", string(id), err)
}
stateBytes := ctrBkt.Get(stateKey)
if stateBytes == nil {
// Badly formatted container bucket
- return errors.Wrapf(define.ErrInternal, "container %s missing state in DB", string(id))
+ return fmt.Errorf("container %s missing state in DB: %w", string(id), define.ErrInternal)
}
state := new(ContainerState)
if err := json.Unmarshal(stateBytes, state); err != nil {
- return errors.Wrapf(err, "error unmarshalling state for container %s", string(id))
+ return fmt.Errorf("error unmarshalling state for container %s: %w", string(id), err)
}
resetState(state)
newStateBytes, err := json.Marshal(state)
if err != nil {
- return errors.Wrapf(err, "error marshalling modified state for container %s", string(id))
+ return fmt.Errorf("error marshalling modified state for container %s: %w", string(id), err)
}
if err := ctrBkt.Put(stateKey, newStateBytes); err != nil {
- return errors.Wrapf(err, "error updating state for container %s in DB", string(id))
+ return fmt.Errorf("error updating state for container %s in DB: %w", string(id), err)
}
// Delete all exec sessions, if there are any
@@ -274,7 +338,7 @@ func (s *BoltState) Refresh() error {
}
for _, execID := range toRemove {
if err := ctrExecBkt.Delete([]byte(execID)); err != nil {
- return errors.Wrapf(err, "error removing exec session %s from container %s", execID, string(id))
+ return fmt.Errorf("error removing exec session %s from container %s: %w", execID, string(id), err)
}
}
}
@@ -285,11 +349,29 @@ func (s *BoltState) Refresh() error {
return err
}
+ // Remove dangling IDs.
+ for _, id := range toRemoveIDs {
+ // Look up the ID to see if we also have a dangling name
+ // in the DB.
+ name := idBucket.Get([]byte(id))
+ if name != nil {
+ if testID := namesBucket.Get(name); testID != nil {
+ logrus.Infof("Found dangling name %s (ID %s) in database", string(name), id)
+ if err := namesBucket.Delete(name); err != nil {
+ return fmt.Errorf("error removing dangling name %s (ID %s) from database: %w", string(name), id, err)
+ }
+ }
+ }
+ if err := idBucket.Delete([]byte(id)); err != nil {
+ return fmt.Errorf("error removing dangling ID %s from database: %w", id, err)
+ }
+ }
+
// Now refresh volumes
err = allVolsBucket.ForEach(func(id, name []byte) error {
dbVol := volBucket.Bucket(id)
if dbVol == nil {
- return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
+ return fmt.Errorf("inconsistency in state - volume %s is in all volumes bucket but volume not found: %w", string(id), define.ErrInternal)
}
// Get the state
@@ -302,7 +384,7 @@ func (s *BoltState) Refresh() error {
oldState := new(VolumeState)
if err := json.Unmarshal(volStateBytes, oldState); err != nil {
- return errors.Wrapf(err, "error unmarshalling state for volume %s", string(id))
+ return fmt.Errorf("error unmarshalling state for volume %s: %w", string(id), err)
}
// Reset mount count to 0
@@ -311,11 +393,11 @@ func (s *BoltState) Refresh() error {
newState, err := json.Marshal(oldState)
if err != nil {
- return errors.Wrapf(err, "error marshalling state for volume %s", string(id))
+ return fmt.Errorf("error marshalling state for volume %s: %w", string(id), err)
}
if err := dbVol.Put(stateKey, newState); err != nil {
- return errors.Wrapf(err, "error storing new state for volume %s", string(id))
+ return fmt.Errorf("error storing new state for volume %s: %w", string(id), err)
}
return nil
@@ -339,7 +421,7 @@ func (s *BoltState) Refresh() error {
for _, execSession := range toRemoveExec {
if err := execBucket.Delete([]byte(execSession)); err != nil {
- return errors.Wrapf(err, "error deleting exec session %s registry from database", execSession)
+ return fmt.Errorf("error deleting exec session %s registry from database: %w", execSession, err)
}
}
@@ -561,7 +643,7 @@ func (s *BoltState) LookupContainerID(idOrName string) (string, error) {
if s.namespaceBytes != nil {
ns := nsBucket.Get(fullID)
if !bytes.Equal(ns, s.namespaceBytes) {
- return errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
+ return fmt.Errorf("no container found with name or ID %s: %w", idOrName, define.ErrNoSuchCtr)
}
}
id = fullID
@@ -686,7 +768,7 @@ func (s *BoltState) AddContainer(ctr *Container) error {
}
if ctr.config.Pod != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot add a container that belongs to a pod with AddContainer - use AddContainerToPod")
+ return fmt.Errorf("cannot add a container that belongs to a pod with AddContainer - use AddContainerToPod: %w", define.ErrInvalidArg)
}
return s.addContainer(ctr, nil)
@@ -701,7 +783,7 @@ func (s *BoltState) RemoveContainer(ctr *Container) error {
}
if ctr.config.Pod != "" {
- return errors.Wrapf(define.ErrPodExists, "container %s is part of a pod, use RemoveContainerFromPod instead", ctr.ID())
+ return fmt.Errorf("container %s is part of a pod, use RemoveContainerFromPod instead: %w", ctr.ID(), define.ErrPodExists)
}
db, err := s.getDBCon()
@@ -727,7 +809,7 @@ func (s *BoltState) UpdateContainer(ctr *Container) error {
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
newState := new(ContainerState)
@@ -750,16 +832,16 @@ func (s *BoltState) UpdateContainer(ctr *Container) error {
ctrToUpdate := ctrBucket.Bucket(ctrID)
if ctrToUpdate == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
newStateBytes := ctrToUpdate.Get(stateKey)
if newStateBytes == nil {
- return errors.Wrapf(define.ErrInternal, "container %s does not have a state key in DB", ctr.ID())
+ return fmt.Errorf("container %s does not have a state key in DB: %w", ctr.ID(), define.ErrInternal)
}
if err := json.Unmarshal(newStateBytes, newState); err != nil {
- return errors.Wrapf(err, "error unmarshalling container %s state", ctr.ID())
+ return fmt.Errorf("error unmarshalling container %s state: %w", ctr.ID(), err)
}
netNSBytes := ctrToUpdate.Get(netNSKey)
@@ -799,12 +881,12 @@ func (s *BoltState) SaveContainer(ctr *Container) error {
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
stateJSON, err := json.Marshal(ctr.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling container %s state to JSON", ctr.ID())
+ return fmt.Errorf("error marshalling container %s state to JSON: %w", ctr.ID(), err)
}
netNSPath := getNetNSPath(ctr)
@@ -825,22 +907,22 @@ func (s *BoltState) SaveContainer(ctr *Container) error {
ctrToSave := ctrBucket.Bucket(ctrID)
if ctrToSave == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in DB", ctr.ID())
+ return fmt.Errorf("container %s does not exist in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
// Update the state
if err := ctrToSave.Put(stateKey, stateJSON); err != nil {
- return errors.Wrapf(err, "error updating container %s state in DB", ctr.ID())
+ return fmt.Errorf("error updating container %s state in DB: %w", ctr.ID(), err)
}
if netNSPath != "" {
if err := ctrToSave.Put(netNSKey, []byte(netNSPath)); err != nil {
- return errors.Wrapf(err, "error updating network namespace path for container %s in DB", ctr.ID())
+ return fmt.Errorf("error updating network namespace path for container %s in DB: %w", ctr.ID(), err)
}
} else {
// Delete the existing network namespace
if err := ctrToSave.Delete(netNSKey); err != nil {
- return errors.Wrapf(err, "error removing network namespace path for container %s in DB", ctr.ID())
+ return fmt.Errorf("error removing network namespace path for container %s in DB: %w", ctr.ID(), err)
}
}
@@ -862,7 +944,7 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return nil, fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
depCtrs := []string{}
@@ -882,12 +964,12 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
ctrDB := ctrBucket.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
+ return fmt.Errorf("no container with ID %q found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
dependsBkt := ctrDB.Bucket(dependenciesBkt)
if dependsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "container %s has no dependencies bucket", ctr.ID())
+ return fmt.Errorf("container %s has no dependencies bucket: %w", ctr.ID(), define.ErrInternal)
}
// Iterate through and add dependencies
@@ -940,7 +1022,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
// be much less helpful.
ctrExists := ctrBucket.Bucket(id)
if ctrExists == nil {
- return errors.Wrapf(define.ErrInternal, "state is inconsistent - container ID %s in all containers, but container not found", string(id))
+ return fmt.Errorf("state is inconsistent - container ID %s in all containers, but container not found: %w", string(id), define.ErrInternal)
}
ctr := new(Container)
@@ -952,7 +1034,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
// ignore it safely.
// We just won't include the container in the
// results.
- if errors.Cause(err) != define.ErrNSMismatch {
+ if !errors.Is(err, define.ErrNSMismatch) {
// Even if it's not an NS mismatch, it's
// not worth erroring over.
// If we do, a single bad container JSON
@@ -984,7 +1066,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return nil, fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
// if the network mode is not bridge return no networks
@@ -1013,7 +1095,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
ctrNetworkBkt := dbCtr.Bucket(networksBkt)
@@ -1051,7 +1133,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
var networkList []string
@@ -1060,7 +1142,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
if ctrNetworkBkt == nil {
ctrNetworkBkt, err = dbCtr.CreateBucket(networksBkt)
if err != nil {
- return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID())
+ return fmt.Errorf("error creating networks bucket for container %s: %w", ctr.ID(), err)
}
// the container has no networks in the db lookup config and write to the db
networkList = ctr.config.NetworksDeprecated
@@ -1084,7 +1166,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti
if ctr.state.NetInterfaceDescriptions != nil {
eth, exists := ctr.state.NetInterfaceDescriptions.getInterfaceByName(network)
if !exists {
- return errors.Errorf("no network interface name for container %s on network %s", ctr.config.ID, network)
+ return fmt.Errorf("no network interface name for container %s on network %s", ctr.config.ID, network)
}
intName = eth
} else {
@@ -1158,16 +1240,16 @@ func (s *BoltState) NetworkConnect(ctr *Container, network string, opts types.Pe
}
if network == "" {
- return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ return fmt.Errorf("network names must not be empty: %w", define.ErrInvalidArg)
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
optBytes, err := json.Marshal(opts)
if err != nil {
- return errors.Wrapf(err, "error marshalling network options JSON for container %s", ctr.ID())
+ return fmt.Errorf("error marshalling network options JSON for container %s: %w", ctr.ID(), err)
}
ctrID := []byte(ctr.ID())
@@ -1187,21 +1269,21 @@ func (s *BoltState) NetworkConnect(ctr *Container, network string, opts types.Pe
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
ctrNetworksBkt := dbCtr.Bucket(networksBkt)
if ctrNetworksBkt == nil {
- return errors.Wrapf(define.ErrNoSuchNetwork, "container %s does not have a network bucket", ctr.ID())
+ return fmt.Errorf("container %s does not have a network bucket: %w", ctr.ID(), define.ErrNoSuchNetwork)
}
netConnected := ctrNetworksBkt.Get([]byte(network))
if netConnected != nil {
- return errors.Wrapf(define.ErrNetworkExists, "container %s is already connected to network %q", ctr.ID(), network)
+ return fmt.Errorf("container %s is already connected to network %q: %w", ctr.ID(), network, define.ErrNetworkExists)
}
// Add the network
if err := ctrNetworksBkt.Put([]byte(network), optBytes); err != nil {
- return errors.Wrapf(err, "error adding container %s to network %s in DB", ctr.ID(), network)
+ return fmt.Errorf("error adding container %s to network %s in DB: %w", ctr.ID(), network, err)
}
return nil
@@ -1220,11 +1302,11 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
}
if network == "" {
- return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ return fmt.Errorf("network names must not be empty: %w", define.ErrInvalidArg)
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
ctrID := []byte(ctr.ID())
@@ -1244,21 +1326,21 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return fmt.Errorf("container %s does not exist in database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
ctrNetworksBkt := dbCtr.Bucket(networksBkt)
if ctrNetworksBkt == nil {
- return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not connected to any CNI networks, so cannot disconnect", ctr.ID())
+ return fmt.Errorf("container %s is not connected to any CNI networks, so cannot disconnect: %w", ctr.ID(), define.ErrNoSuchNetwork)
}
netConnected := ctrNetworksBkt.Get([]byte(network))
if netConnected == nil {
- return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not connected to CNI network %q", ctr.ID(), network)
+ return fmt.Errorf("container %s is not connected to CNI network %q: %w", ctr.ID(), network, define.ErrNoSuchNetwork)
}
if err := ctrNetworksBkt.Delete([]byte(network)); err != nil {
- return errors.Wrapf(err, "error removing container %s from network %s", ctr.ID(), network)
+ return fmt.Errorf("error removing container %s from network %s: %w", ctr.ID(), network, err)
}
if ctrAliasesBkt != nil {
@@ -1268,7 +1350,7 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
}
if err := ctrAliasesBkt.DeleteBucket([]byte(network)); err != nil {
- return errors.Wrapf(err, "error removing container %s network aliases for network %s", ctr.ID(), network)
+ return fmt.Errorf("error removing container %s network aliases for network %s: %w", ctr.ID(), network, err)
}
}
@@ -1309,6 +1391,204 @@ func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) {
return config, nil
}
+// AddContainerExitCode adds the exit code for the specified container to the database.
+func (s *BoltState) AddContainerExitCode(id string, exitCode int32) error {
+ if len(id) == 0 {
+ return define.ErrEmptyID
+ }
+
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ rawID := []byte(id)
+ rawExitCode := []byte(strconv.Itoa(int(exitCode)))
+ rawTimeStamp, err := time.Now().MarshalText()
+ if err != nil {
+ return fmt.Errorf("marshaling exit-code time stamp: %w", err)
+ }
+
+ return db.Update(func(tx *bolt.Tx) error {
+ exitCodeBucket, err := getExitCodeBucket(tx)
+ if err != nil {
+ return err
+ }
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ if err := exitCodeBucket.Put(rawID, rawExitCode); err != nil {
+ return fmt.Errorf("adding exit code of container %s to DB: %w", id, err)
+ }
+ if err := timeStampBucket.Put(rawID, rawTimeStamp); err != nil {
+ if rmErr := exitCodeBucket.Delete(rawID); rmErr != nil {
+ logrus.Errorf("Removing exit code of container %s from DB: %v", id, rmErr)
+ }
+ return fmt.Errorf("adding exit-code time stamp of container %s to DB: %w", id, err)
+ }
+
+ return nil
+ })
+}
+
+// GetContainerExitCode returns the exit code for the specified container.
+func (s *BoltState) GetContainerExitCode(id string) (int32, error) {
+ if len(id) == 0 {
+ return -1, define.ErrEmptyID
+ }
+
+ if !s.valid {
+ return -1, define.ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return -1, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ rawID := []byte(id)
+ result := int32(-1)
+ return result, db.View(func(tx *bolt.Tx) error {
+ exitCodeBucket, err := getExitCodeBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ rawExitCode := exitCodeBucket.Get(rawID)
+ if rawExitCode == nil {
+ return fmt.Errorf("getting exit code of container %s from DB: %w", id, define.ErrNoSuchExitCode)
+ }
+
+ exitCode, err := strconv.Atoi(string(rawExitCode))
+ if err != nil {
+ return fmt.Errorf("converting raw exit code %v of container %s: %w", rawExitCode, id, err)
+ }
+
+ result = int32(exitCode)
+ return nil
+ })
+}
+
+// GetContainerExitCodeTimeStamp returns the time stamp when the exit code of
+// the specified container was added to the database.
+func (s *BoltState) GetContainerExitCodeTimeStamp(id string) (*time.Time, error) {
+ if len(id) == 0 {
+ return nil, define.ErrEmptyID
+ }
+
+ if !s.valid {
+ return nil, define.ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ rawID := []byte(id)
+ var result time.Time
+ return &result, db.View(func(tx *bolt.Tx) error {
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ rawTimeStamp := timeStampBucket.Get(rawID)
+ if rawTimeStamp == nil {
+ return fmt.Errorf("getting exit-code time stamp of container %s from DB: %w", id, define.ErrNoSuchExitCode)
+ }
+
+ if err := result.UnmarshalText(rawTimeStamp); err != nil {
+ return fmt.Errorf("converting raw time stamp %v of container %s from DB: %w", rawTimeStamp, id, err)
+ }
+
+ return nil
+ })
+}
+
+// PruneExitCodes removes exit codes older than 5 minutes.
+func (s *BoltState) PruneContainerExitCodes() error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ toRemoveIDs := []string{}
+
+ threshold := time.Minute * 5
+ err = db.View(func(tx *bolt.Tx) error {
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ return timeStampBucket.ForEach(func(rawID, rawTimeStamp []byte) error {
+ var timeStamp time.Time
+ if err := timeStamp.UnmarshalText(rawTimeStamp); err != nil {
+ return fmt.Errorf("converting raw time stamp %v of container %s from DB: %w", rawTimeStamp, string(rawID), err)
+ }
+ if time.Since(timeStamp) > threshold {
+ toRemoveIDs = append(toRemoveIDs, string(rawID))
+ }
+ return nil
+ })
+ })
+ if err != nil {
+ return fmt.Errorf("reading exit codes to prune: %w", err)
+ }
+
+ if len(toRemoveIDs) > 0 {
+ err = db.Update(func(tx *bolt.Tx) error {
+ exitCodeBucket, err := getExitCodeBucket(tx)
+ if err != nil {
+ return err
+ }
+ timeStampBucket, err := getExitCodeTimeStampBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ var finalErr error
+ for _, id := range toRemoveIDs {
+ rawID := []byte(id)
+ if err := exitCodeBucket.Delete(rawID); err != nil {
+ if finalErr != nil {
+ logrus.Error(finalErr)
+ }
+ finalErr = fmt.Errorf("removing exit code of container %s from DB: %w", id, err)
+ }
+ if err := timeStampBucket.Delete(rawID); err != nil {
+ if finalErr != nil {
+ logrus.Error(finalErr)
+ }
+ finalErr = fmt.Errorf("removing exit code timestamp of container %s from DB: %w", id, err)
+ }
+ }
+
+ return finalErr
+ })
+ if err != nil {
+ return fmt.Errorf("pruning exit codes: %w", err)
+ }
+ }
+
+ return nil
+}
+
// AddExecSession adds an exec session to the state.
func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error {
if !s.valid {
@@ -1341,25 +1621,25 @@ func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error {
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not present in the database", ctr.ID())
+ return fmt.Errorf("container %s is not present in the database: %w", ctr.ID(), define.ErrNoSuchCtr)
}
ctrExecSessionBucket, err := dbCtr.CreateBucketIfNotExists(execBkt)
if err != nil {
- return errors.Wrapf(err, "error creating exec sessions bucket for container %s", ctr.ID())
+ return fmt.Errorf("error creating exec sessions bucket for container %s: %w", ctr.ID(), err)
}
execExists := execBucket.Get(sessionID)
if execExists != nil {
- return errors.Wrapf(define.ErrExecSessionExists, "an exec session with ID %s already exists", session.ID())
+ return fmt.Errorf("an exec session with ID %s already exists: %w", session.ID(), define.ErrExecSessionExists)
}
if err := execBucket.Put(sessionID, ctrID); err != nil {
- return errors.Wrapf(err, "error adding exec session %s to DB", session.ID())
+ return fmt.Errorf("error adding exec session %s to DB: %w", session.ID(), err)
}
if err := ctrExecSessionBucket.Put(sessionID, ctrID); err != nil {
- return errors.Wrapf(err, "error adding exec session %s to container %s in DB", session.ID(), ctr.ID())
+ return fmt.Errorf("error adding exec session %s to container %s in DB: %w", session.ID(), ctr.ID(), err)
}
return nil
@@ -1393,7 +1673,7 @@ func (s *BoltState) GetExecSession(id string) (string, error) {
ctr := execBucket.Get([]byte(id))
if ctr == nil {
- return errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found", id)
+ return fmt.Errorf("no exec session with ID %s found: %w", id, define.ErrNoSuchExecSession)
}
ctrID = string(ctr)
return nil
@@ -1432,11 +1712,11 @@ func (s *BoltState) RemoveExecSession(session *ExecSession) error {
}
// Check that container ID matches
if string(sessionExists) != session.ContainerID() {
- return errors.Wrapf(define.ErrInternal, "database inconsistency: exec session %s points to container %s in state but %s in database", session.ID(), session.ContainerID(), string(sessionExists))
+ return fmt.Errorf("database inconsistency: exec session %s points to container %s in state but %s in database: %w", session.ID(), session.ContainerID(), string(sessionExists), define.ErrInternal)
}
if err := execBucket.Delete(sessionID); err != nil {
- return errors.Wrapf(err, "error removing exec session %s from database", session.ID())
+ return fmt.Errorf("error removing exec session %s from database: %w", session.ID(), err)
}
dbCtr := ctrBucket.Bucket(containerID)
@@ -1459,7 +1739,7 @@ func (s *BoltState) RemoveExecSession(session *ExecSession) error {
ctrSessionExists := ctrExecBucket.Get(sessionID)
if ctrSessionExists != nil {
if err := ctrExecBucket.Delete(sessionID); err != nil {
- return errors.Wrapf(err, "error removing exec session %s from container %s in database", session.ID(), session.ContainerID())
+ return fmt.Errorf("error removing exec session %s from container %s in database: %w", session.ID(), session.ContainerID(), err)
}
}
@@ -1567,7 +1847,7 @@ func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error {
for _, session := range sessions {
if err := ctrExecSessions.Delete([]byte(session)); err != nil {
- return errors.Wrapf(err, "error removing container %s exec session %s from database", ctr.ID(), session)
+ return fmt.Errorf("error removing container %s exec session %s from database: %w", ctr.ID(), session, err)
}
// Check if the session exists in the global table
// before removing. It should, but in cases where the DB
@@ -1578,10 +1858,10 @@ func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error {
continue
}
if string(sessionExists) != ctr.ID() {
- return errors.Wrapf(define.ErrInternal, "database mismatch: exec session %s is associated with containers %s and %s", session, ctr.ID(), string(sessionExists))
+ return fmt.Errorf("database mismatch: exec session %s is associated with containers %s and %s: %w", session, ctr.ID(), string(sessionExists), define.ErrInternal)
}
if err := execBucket.Delete([]byte(session)); err != nil {
- return errors.Wrapf(err, "error removing container %s exec session %s from exec sessions", ctr.ID(), session)
+ return fmt.Errorf("error removing container %s exec session %s from exec sessions: %w", ctr.ID(), session, err)
}
}
@@ -1604,7 +1884,7 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ return fmt.Errorf("error marshalling new configuration JSON for container %s: %w", ctr.ID(), err)
}
db, err := s.getDBCon()
@@ -1622,11 +1902,11 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
+ return fmt.Errorf("no container with ID %q found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
- return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ return fmt.Errorf("error updating container %s config JSON: %w", ctr.ID(), err)
}
return nil
@@ -1649,15 +1929,15 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
}
if newName != "" && newCfg.Name != newName {
- return errors.Wrapf(define.ErrInvalidArg, "new name %s for container %s must match name in given container config", newName, ctr.ID())
+ return fmt.Errorf("new name %s for container %s must match name in given container config: %w", newName, ctr.ID(), define.ErrInvalidArg)
}
if newName != "" && oldName == "" {
- return errors.Wrapf(define.ErrInvalidArg, "must provide old name for container if a new name is given")
+ return fmt.Errorf("must provide old name for container if a new name is given: %w", define.ErrInvalidArg)
}
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ return fmt.Errorf("error marshalling new configuration JSON for container %s: %w", ctr.ID(), err)
}
db, err := s.getDBCon()
@@ -1689,7 +1969,7 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
// rename.
needsRename = false
} else {
- return errors.Wrapf(define.ErrCtrExists, "name %s already in use, cannot rename container %s", newName, ctr.ID())
+ return fmt.Errorf("name %s already in use, cannot rename container %s: %w", newName, ctr.ID(), define.ErrCtrExists)
}
}
@@ -1698,16 +1978,16 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
// buckets are ID-indexed so we just need to
// overwrite the values there.
if err := namesBkt.Delete([]byte(oldName)); err != nil {
- return errors.Wrapf(err, "error deleting container %s old name from DB for rename", ctr.ID())
+ return fmt.Errorf("error deleting container %s old name from DB for rename: %w", ctr.ID(), err)
}
if err := idBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
- return errors.Wrapf(err, "error renaming container %s in ID bucket in DB", ctr.ID())
+ return fmt.Errorf("error renaming container %s in ID bucket in DB: %w", ctr.ID(), err)
}
if err := namesBkt.Put([]byte(newName), []byte(ctr.ID())); err != nil {
- return errors.Wrapf(err, "error adding new name %s for container %s in DB", newName, ctr.ID())
+ return fmt.Errorf("error adding new name %s for container %s in DB: %w", newName, ctr.ID(), err)
}
if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
- return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID())
+ return fmt.Errorf("error renaming container %s in all containers bucket in DB: %w", ctr.ID(), err)
}
if ctr.config.Pod != "" {
podsBkt, err := getPodBucket(tx)
@@ -1716,14 +1996,14 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
}
podBkt := podsBkt.Bucket([]byte(ctr.config.Pod))
if podBkt == nil {
- return errors.Wrapf(define.ErrInternal, "bucket for pod %s does not exist", ctr.config.Pod)
+ return fmt.Errorf("bucket for pod %s does not exist: %w", ctr.config.Pod, define.ErrInternal)
}
podCtrBkt := podBkt.Bucket(containersBkt)
if podCtrBkt == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", ctr.config.Pod)
+ return fmt.Errorf("pod %s does not have a containers bucket: %w", ctr.config.Pod, define.ErrInternal)
}
if err := podCtrBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
- return errors.Wrapf(err, "error renaming container %s in pod %s members bucket", ctr.ID(), ctr.config.Pod)
+ return fmt.Errorf("error renaming container %s in pod %s members bucket: %w", ctr.ID(), ctr.config.Pod, err)
}
}
}
@@ -1737,11 +2017,11 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %q found in DB", ctr.ID())
+ return fmt.Errorf("no container with ID %q found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
- return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ return fmt.Errorf("error updating container %s config JSON: %w", ctr.ID(), err)
}
return nil
@@ -1763,7 +2043,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for pod %s", pod.ID())
+ return fmt.Errorf("error marshalling new configuration JSON for pod %s: %w", pod.ID(), err)
}
db, err := s.getDBCon()
@@ -1781,11 +2061,11 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
podDB := podBkt.Bucket([]byte(pod.ID()))
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ return fmt.Errorf("no pod with ID %s found in DB: %w", pod.ID(), define.ErrNoSuchPod)
}
if err := podDB.Put(configKey, newCfgJSON); err != nil {
- return errors.Wrapf(err, "error updating pod %s config JSON", pod.ID())
+ return fmt.Errorf("error updating pod %s config JSON: %w", pod.ID(), err)
}
return nil
@@ -1807,7 +2087,7 @@ func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) er
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
- return errors.Wrapf(err, "error marshalling new configuration JSON for volume %q", volume.Name())
+ return fmt.Errorf("error marshalling new configuration JSON for volume %q: %w", volume.Name(), err)
}
db, err := s.getDBCon()
@@ -1825,11 +2105,11 @@ func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) er
volDB := volBkt.Bucket([]byte(volume.Name()))
if volDB == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found in DB", volume.Name())
+ return fmt.Errorf("no volume with name %q found in DB: %w", volume.Name(), define.ErrNoSuchVolume)
}
if err := volDB.Put(configKey, newCfgJSON); err != nil {
- return errors.Wrapf(err, "error updating volume %q config JSON", volume.Name())
+ return fmt.Errorf("error updating volume %q config JSON: %w", volume.Name(), err)
}
return nil
@@ -1953,7 +2233,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
}
if strings.HasPrefix(string(checkID), idOrName) {
if exists {
- return errors.Wrapf(define.ErrPodExists, "more than one result for ID or name %s", idOrName)
+ return fmt.Errorf("more than one result for ID or name %s: %w", idOrName, define.ErrPodExists)
}
id = checkID
exists = true
@@ -1965,9 +2245,9 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
return err
} else if !exists {
if isCtr {
- return errors.Wrapf(define.ErrNoSuchPod, "%s is a container, not a pod", idOrName)
+ return fmt.Errorf("%s is a container, not a pod: %w", idOrName, define.ErrNoSuchPod)
}
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with name or ID %s found", idOrName)
+ return fmt.Errorf("no pod with name or ID %s found: %w", idOrName, define.ErrNoSuchPod)
}
// We might have found a container ID, but it's OK
@@ -2043,7 +2323,7 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return false, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return false, fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
ctrID := []byte(id)
@@ -2067,13 +2347,13 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return fmt.Errorf("pod %s not found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return fmt.Errorf("pod %s missing containers bucket in DB: %w", pod.ID(), define.ErrInternal)
}
// Don't bother with a namespace check on the container -
@@ -2106,7 +2386,7 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return nil, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return nil, fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -2129,13 +2409,13 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return fmt.Errorf("pod %s not found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return fmt.Errorf("pod %s missing containers bucket in DB: %w", pod.ID(), define.ErrInternal)
}
// Iterate through all containers in the pod
@@ -2168,7 +2448,7 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return nil, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return nil, fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -2196,13 +2476,13 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return fmt.Errorf("pod %s not found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return fmt.Errorf("pod %s missing containers bucket in DB: %w", pod.ID(), define.ErrInternal)
}
// Iterate through all containers in the pod
@@ -2242,7 +2522,7 @@ func (s *BoltState) AddVolume(volume *Volume) error {
volConfigJSON, err := json.Marshal(volume.config)
if err != nil {
- return errors.Wrapf(err, "error marshalling volume %s config to JSON", volume.Name())
+ return fmt.Errorf("error marshalling volume %s config to JSON: %w", volume.Name(), err)
}
// Volume state is allowed to not exist
@@ -2250,7 +2530,7 @@ func (s *BoltState) AddVolume(volume *Volume) error {
if volume.state != nil {
volStateJSON, err = json.Marshal(volume.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name())
+ return fmt.Errorf("error marshalling volume %s state to JSON: %w", volume.Name(), err)
}
}
@@ -2274,34 +2554,34 @@ func (s *BoltState) AddVolume(volume *Volume) error {
// Check if we already have a volume with the given name
volExists := allVolsBkt.Get(volName)
if volExists != nil {
- return errors.Wrapf(define.ErrVolumeExists, "name %s is in use", volume.Name())
+ return fmt.Errorf("name %s is in use: %w", volume.Name(), define.ErrVolumeExists)
}
// We are good to add the volume
// Make a bucket for it
newVol, err := volBkt.CreateBucket(volName)
if err != nil {
- return errors.Wrapf(err, "error creating bucket for volume %s", volume.Name())
+ return fmt.Errorf("error creating bucket for volume %s: %w", volume.Name(), err)
}
// Make a subbucket for the containers using the volume. Dependent container IDs will be addedremoved to
// this bucket in addcontainer/removeContainer
if _, err := newVol.CreateBucket(volDependenciesBkt); err != nil {
- return errors.Wrapf(err, "error creating bucket for containers using volume %s", volume.Name())
+ return fmt.Errorf("error creating bucket for containers using volume %s: %w", volume.Name(), err)
}
if err := newVol.Put(configKey, volConfigJSON); err != nil {
- return errors.Wrapf(err, "error storing volume %s configuration in DB", volume.Name())
+ return fmt.Errorf("error storing volume %s configuration in DB: %w", volume.Name(), err)
}
if volStateJSON != nil {
if err := newVol.Put(stateKey, volStateJSON); err != nil {
- return errors.Wrapf(err, "error storing volume %s state in DB", volume.Name())
+ return fmt.Errorf("error storing volume %s state in DB: %w", volume.Name(), err)
}
}
if err := allVolsBkt.Put(volName, volName); err != nil {
- return errors.Wrapf(err, "error storing volume %s in all volumes bucket in DB", volume.Name())
+ return fmt.Errorf("error storing volume %s in all volumes bucket in DB: %w", volume.Name(), err)
}
return nil
@@ -2343,7 +2623,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
volDB := volBkt.Bucket(volName)
if volDB == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "volume %s does not exist in DB", volume.Name())
+ return fmt.Errorf("volume %s does not exist in DB: %w", volume.Name(), define.ErrNoSuchVolume)
}
// Check if volume is not being used by any container
@@ -2370,20 +2650,20 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return nil
})
if err != nil {
- return errors.Wrapf(err, "error getting list of dependencies from dependencies bucket for volumes %q", volume.Name())
+ return fmt.Errorf("error getting list of dependencies from dependencies bucket for volumes %q: %w", volume.Name(), err)
}
if len(deps) > 0 {
- return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by container(s) %s", volume.Name(), strings.Join(deps, ","))
+ return fmt.Errorf("volume %s is being used by container(s) %s: %w", volume.Name(), strings.Join(deps, ","), define.ErrVolumeBeingUsed)
}
}
// volume is ready for removal
// Let's kick it out
if err := allVolsBkt.Delete(volName); err != nil {
- return errors.Wrapf(err, "error removing volume %s from all volumes bucket in DB", volume.Name())
+ return fmt.Errorf("error removing volume %s from all volumes bucket in DB: %w", volume.Name(), err)
}
if err := volBkt.DeleteBucket(volName); err != nil {
- return errors.Wrapf(err, "error removing volume %s from DB", volume.Name())
+ return fmt.Errorf("error removing volume %s from DB: %w", volume.Name(), err)
}
return nil
@@ -2419,7 +2699,7 @@ func (s *BoltState) UpdateVolume(volume *Volume) error {
volToUpdate := volBucket.Bucket(volumeName)
if volToUpdate == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
+ return fmt.Errorf("no volume with name %s found in database: %w", volume.Name(), define.ErrNoSuchVolume)
}
stateBytes := volToUpdate.Get(stateKey)
@@ -2430,7 +2710,7 @@ func (s *BoltState) UpdateVolume(volume *Volume) error {
}
if err := json.Unmarshal(stateBytes, newState); err != nil {
- return errors.Wrapf(err, "error unmarshalling volume %s state", volume.Name())
+ return fmt.Errorf("error unmarshalling volume %s state: %w", volume.Name(), err)
}
return nil
@@ -2460,7 +2740,7 @@ func (s *BoltState) SaveVolume(volume *Volume) error {
if volume.state != nil {
stateJSON, err := json.Marshal(volume.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name())
+ return fmt.Errorf("error marshalling volume %s state to JSON: %w", volume.Name(), err)
}
newStateJSON = stateJSON
}
@@ -2480,7 +2760,7 @@ func (s *BoltState) SaveVolume(volume *Volume) error {
volToUpdate := volBucket.Bucket(volumeName)
if volToUpdate == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
+ return fmt.Errorf("no volume with name %s found in database: %w", volume.Name(), define.ErrNoSuchVolume)
}
return volToUpdate.Put(stateKey, newStateJSON)
@@ -2517,7 +2797,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
// This check can be removed if performance becomes an
// issue, but much less helpful errors will be produced
if volExists == nil {
- return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
+ return fmt.Errorf("inconsistency in state - volume %s is in all volumes bucket but volume not found: %w", string(id), define.ErrInternal)
}
volume := new(Volume)
@@ -2525,7 +2805,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
volume.state = new(VolumeState)
if err := s.getVolumeFromDB(id, volume, volBucket); err != nil {
- if errors.Cause(err) != define.ErrNSMismatch {
+ if !errors.Is(err, define.ErrNSMismatch) {
logrus.Errorf("Retrieving volume %s from the database: %v", string(id), err)
}
} else {
@@ -2624,7 +2904,7 @@ func (s *BoltState) LookupVolume(name string) (*Volume, error) {
err = allVolsBkt.ForEach(func(checkName, checkName2 []byte) error {
if strings.HasPrefix(string(checkName), name) {
if foundMatch {
- return errors.Wrapf(define.ErrVolumeExists, "more than one result for volume name %q", name)
+ return fmt.Errorf("more than one result for volume name %q: %w", name, define.ErrVolumeExists)
}
foundMatch = true
volName = checkName
@@ -2636,7 +2916,7 @@ func (s *BoltState) LookupVolume(name string) (*Volume, error) {
}
if !foundMatch {
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found", name)
+ return fmt.Errorf("no volume with name %q found: %w", name, define.ErrNoSuchVolume)
}
return s.getVolumeFromDB(volName, volume, volBkt)
@@ -2722,12 +3002,12 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
volDB := volBucket.Bucket([]byte(volume.Name()))
if volDB == nil {
volume.valid = false
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in DB", volume.Name())
+ return fmt.Errorf("no volume with name %s found in DB: %w", volume.Name(), define.ErrNoSuchVolume)
}
dependsBkt := volDB.Bucket(volDependenciesBkt)
if dependsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "volume %s has no dependencies bucket", volume.Name())
+ return fmt.Errorf("volume %s has no dependencies bucket: %w", volume.Name(), define.ErrInternal)
}
// Iterate through and add dependencies
@@ -2767,7 +3047,7 @@ func (s *BoltState) AddPod(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -2780,12 +3060,12 @@ func (s *BoltState) AddPod(pod *Pod) error {
podConfigJSON, err := json.Marshal(pod.config)
if err != nil {
- return errors.Wrapf(err, "error marshalling pod %s config to JSON", pod.ID())
+ return fmt.Errorf("error marshalling pod %s config to JSON: %w", pod.ID(), err)
}
podStateJSON, err := json.Marshal(pod.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling pod %s state to JSON", pod.ID())
+ return fmt.Errorf("error marshalling pod %s state to JSON: %w", pod.ID(), err)
}
db, err := s.getDBCon()
@@ -2827,7 +3107,7 @@ func (s *BoltState) AddPod(pod *Pod) error {
if allPodsBkt.Get(idExist) == nil {
err = define.ErrCtrExists
}
- return errors.Wrapf(err, "ID \"%s\" is in use", pod.ID())
+ return fmt.Errorf("ID \"%s\" is in use: %w", pod.ID(), err)
}
nameExist := namesBkt.Get(podName)
if nameExist != nil {
@@ -2835,47 +3115,47 @@ func (s *BoltState) AddPod(pod *Pod) error {
if allPodsBkt.Get(nameExist) == nil {
err = define.ErrCtrExists
}
- return errors.Wrapf(err, "name \"%s\" is in use", pod.Name())
+ return fmt.Errorf("name \"%s\" is in use: %w", pod.Name(), err)
}
// We are good to add the pod
// Make a bucket for it
newPod, err := podBkt.CreateBucket(podID)
if err != nil {
- return errors.Wrapf(err, "error creating bucket for pod %s", pod.ID())
+ return fmt.Errorf("error creating bucket for pod %s: %w", pod.ID(), err)
}
// Make a subbucket for pod containers
if _, err := newPod.CreateBucket(containersBkt); err != nil {
- return errors.Wrapf(err, "error creating bucket for pod %s containers", pod.ID())
+ return fmt.Errorf("error creating bucket for pod %s containers: %w", pod.ID(), err)
}
if err := newPod.Put(configKey, podConfigJSON); err != nil {
- return errors.Wrapf(err, "error storing pod %s configuration in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s configuration in DB: %w", pod.ID(), err)
}
if err := newPod.Put(stateKey, podStateJSON); err != nil {
- return errors.Wrapf(err, "error storing pod %s state JSON in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s state JSON in DB: %w", pod.ID(), err)
}
if podNamespace != nil {
if err := newPod.Put(namespaceKey, podNamespace); err != nil {
- return errors.Wrapf(err, "error storing pod %s namespace in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s namespace in DB: %w", pod.ID(), err)
}
if err := nsBkt.Put(podID, podNamespace); err != nil {
- return errors.Wrapf(err, "error storing pod %s namespace in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s namespace in DB: %w", pod.ID(), err)
}
}
// Add us to the ID and names buckets
if err := idsBkt.Put(podID, podName); err != nil {
- return errors.Wrapf(err, "error storing pod %s ID in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s ID in DB: %w", pod.ID(), err)
}
if err := namesBkt.Put(podName, podID); err != nil {
- return errors.Wrapf(err, "error storing pod %s name in DB", pod.Name())
+ return fmt.Errorf("error storing pod %s name in DB: %w", pod.Name(), err)
}
if err := allPodsBkt.Put(podID, podName); err != nil {
- return errors.Wrapf(err, "error storing pod %s in all pods bucket in DB", pod.ID())
+ return fmt.Errorf("error storing pod %s in all pods bucket in DB: %w", pod.ID(), err)
}
return nil
@@ -2899,7 +3179,7 @@ func (s *BoltState) RemovePod(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -2941,7 +3221,7 @@ func (s *BoltState) RemovePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in DB", pod.ID())
+ return fmt.Errorf("pod %s does not exist in DB: %w", pod.ID(), define.ErrNoSuchPod)
}
// Check if pod is empty
@@ -2953,26 +3233,26 @@ func (s *BoltState) RemovePod(pod *Pod) error {
if podCtrsBkt != nil {
cursor := podCtrsBkt.Cursor()
if id, _ := cursor.First(); id != nil {
- return errors.Wrapf(define.ErrCtrExists, "pod %s is not empty", pod.ID())
+ return fmt.Errorf("pod %s is not empty: %w", pod.ID(), define.ErrCtrExists)
}
}
// Pod is empty, and ready for removal
// Let's kick it out
if err := idsBkt.Delete(podID); err != nil {
- return errors.Wrapf(err, "error removing pod %s ID from DB", pod.ID())
+ return fmt.Errorf("error removing pod %s ID from DB: %w", pod.ID(), err)
}
if err := namesBkt.Delete(podName); err != nil {
- return errors.Wrapf(err, "error removing pod %s name (%s) from DB", pod.ID(), pod.Name())
+ return fmt.Errorf("error removing pod %s name (%s) from DB: %w", pod.ID(), pod.Name(), err)
}
if err := nsBkt.Delete(podID); err != nil {
- return errors.Wrapf(err, "error removing pod %s namespace from DB", pod.ID())
+ return fmt.Errorf("error removing pod %s namespace from DB: %w", pod.ID(), err)
}
if err := allPodsBkt.Delete(podID); err != nil {
- return errors.Wrapf(err, "error removing pod %s ID from all pods bucket in DB", pod.ID())
+ return fmt.Errorf("error removing pod %s ID from all pods bucket in DB: %w", pod.ID(), err)
}
if err := podBkt.DeleteBucket(podID); err != nil {
- return errors.Wrapf(err, "error removing pod %s from DB", pod.ID())
+ return fmt.Errorf("error removing pod %s from DB: %w", pod.ID(), err)
}
return nil
@@ -2995,7 +3275,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
podID := []byte(pod.ID())
@@ -3036,12 +3316,12 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in DB", pod.ID())
+ return fmt.Errorf("pod %s does not exist in DB: %w", pod.ID(), define.ErrNoSuchPod)
}
podCtrsBkt := podDB.Bucket(containersBkt)
if podCtrsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID())
+ return fmt.Errorf("pod %s does not have a containers bucket: %w", pod.ID(), define.ErrInternal)
}
// Traverse all containers in the pod with a cursor
@@ -3052,7 +3332,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
if ctr == nil {
// This should never happen
// State is inconsistent
- return errors.Wrapf(define.ErrNoSuchCtr, "pod %s referenced nonexistent container %s", pod.ID(), string(id))
+ return fmt.Errorf("pod %s referenced nonexistent container %s: %w", pod.ID(), string(id), define.ErrNoSuchCtr)
}
ctrDeps := ctr.Bucket(dependenciesBkt)
// This should never be nil, but if it is, we're
@@ -3061,7 +3341,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
err = ctrDeps.ForEach(func(depID, name []byte) error {
exists := podCtrsBkt.Get(depID)
if exists == nil {
- return errors.Wrapf(define.ErrCtrExists, "container %s has dependency %s outside of pod %s", string(id), string(depID), pod.ID())
+ return fmt.Errorf("container %s has dependency %s outside of pod %s: %w", string(id), string(depID), pod.ID(), define.ErrCtrExists)
}
return nil
})
@@ -3073,19 +3353,19 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
// Dependencies are set, we're clear to remove
if err := ctrBkt.DeleteBucket(id); err != nil {
- return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", string(id))
+ return fmt.Errorf("error deleting container %s from DB: %w", string(id), define.ErrInternal)
}
if err := idsBkt.Delete(id); err != nil {
- return errors.Wrapf(err, "error deleting container %s ID in DB", string(id))
+ return fmt.Errorf("error deleting container %s ID in DB: %w", string(id), err)
}
if err := namesBkt.Delete(name); err != nil {
- return errors.Wrapf(err, "error deleting container %s name in DB", string(id))
+ return fmt.Errorf("error deleting container %s name in DB: %w", string(id), err)
}
if err := allCtrsBkt.Delete(id); err != nil {
- return errors.Wrapf(err, "error deleting container %s ID from all containers bucket in DB", string(id))
+ return fmt.Errorf("error deleting container %s ID from all containers bucket in DB: %w", string(id), err)
}
return nil
@@ -3096,10 +3376,10 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
// Delete and recreate the bucket to empty it
if err := podDB.DeleteBucket(containersBkt); err != nil {
- return errors.Wrapf(err, "error removing pod %s containers bucket", pod.ID())
+ return fmt.Errorf("error removing pod %s containers bucket: %w", pod.ID(), err)
}
if _, err := podDB.CreateBucket(containersBkt); err != nil {
- return errors.Wrapf(err, "error recreating pod %s containers bucket", pod.ID())
+ return fmt.Errorf("error recreating pod %s containers bucket: %w", pod.ID(), err)
}
return nil
@@ -3127,7 +3407,7 @@ func (s *BoltState) AddContainerToPod(pod *Pod, ctr *Container) error {
}
if ctr.config.Pod != pod.ID() {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not part of pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("container %s is not part of pod %s: %w", ctr.ID(), pod.ID(), define.ErrNoSuchCtr)
}
return s.addContainer(ctr, pod)
@@ -3146,19 +3426,19 @@ func (s *BoltState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
if s.namespace != "" {
if s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
if s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s in in namespace %q but we are in namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s in in namespace %q but we are in namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
}
if ctr.config.Pod == "" {
- return errors.Wrapf(define.ErrNoSuchPod, "container %s is not part of a pod, use RemoveContainer instead", ctr.ID())
+ return fmt.Errorf("container %s is not part of a pod, use RemoveContainer instead: %w", ctr.ID(), define.ErrNoSuchPod)
}
if ctr.config.Pod != pod.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "container %s is not part of pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("container %s is not part of pod %s: %w", ctr.ID(), pod.ID(), define.ErrInvalidArg)
}
db, err := s.getDBCon()
@@ -3184,7 +3464,7 @@ func (s *BoltState) UpdatePod(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
newState := new(podState)
@@ -3206,17 +3486,17 @@ func (s *BoltState) UpdatePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in database", pod.ID())
+ return fmt.Errorf("no pod with ID %s found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Get the pod state JSON
podStateBytes := podDB.Get(stateKey)
if podStateBytes == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s is missing state key in DB", pod.ID())
+ return fmt.Errorf("pod %s is missing state key in DB: %w", pod.ID(), define.ErrInternal)
}
if err := json.Unmarshal(podStateBytes, newState); err != nil {
- return errors.Wrapf(err, "error unmarshalling pod %s state JSON", pod.ID())
+ return fmt.Errorf("error unmarshalling pod %s state JSON: %w", pod.ID(), err)
}
return nil
@@ -3241,12 +3521,12 @@ func (s *BoltState) SavePod(pod *Pod) error {
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q but we are in namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
stateJSON, err := json.Marshal(pod.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling pod %s state to JSON", pod.ID())
+ return fmt.Errorf("error marshalling pod %s state to JSON: %w", pod.ID(), err)
}
db, err := s.getDBCon()
@@ -3266,12 +3546,12 @@ func (s *BoltState) SavePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in database", pod.ID())
+ return fmt.Errorf("no pod with ID %s found in database: %w", pod.ID(), define.ErrNoSuchPod)
}
// Set the pod state JSON
if err := podDB.Put(stateKey, stateJSON); err != nil {
- return errors.Wrapf(err, "error updating pod %s state in database", pod.ID())
+ return fmt.Errorf("error updating pod %s state in database: %w", pod.ID(), err)
}
return nil
@@ -3313,7 +3593,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
// This check can be removed if performance becomes an
// issue, but much less helpful errors will be produced
if podExists == nil {
- return errors.Wrapf(define.ErrInternal, "inconsistency in state - pod %s is in all pods bucket but pod not found", string(id))
+ return fmt.Errorf("inconsistency in state - pod %s is in all pods bucket but pod not found: %w", string(id), define.ErrInternal)
}
pod := new(Pod)
@@ -3321,7 +3601,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
pod.state = new(podState)
if err := s.getPodFromDB(id, pod, podBucket); err != nil {
- if errors.Cause(err) != define.ErrNSMismatch {
+ if !errors.Is(err, define.ErrNSMismatch) {
logrus.Errorf("Retrieving pod %s from the database: %v", string(id), err)
}
} else {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index d6f035af9..f28fadfa9 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -2,6 +2,7 @@ package libpod
import (
"bytes"
+ "fmt"
"os"
"path/filepath"
"runtime"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
@@ -29,6 +29,9 @@ const (
aliasesName = "aliases"
runtimeConfigName = "runtime-config"
+ exitCodeName = "exit-code"
+ exitCodeTimeStampName = "exit-code-time-stamp"
+
configName = "config"
stateName = "state"
dependenciesName = "dependencies"
@@ -65,6 +68,9 @@ var (
volDependenciesBkt = []byte(volCtrDependencies)
networksBkt = []byte(networksName)
+ exitCodeBkt = []byte(exitCodeName)
+ exitCodeTimeStampBkt = []byte(exitCodeTimeStampName)
+
configKey = []byte(configName)
stateKey = []byte(stateName)
netNSKey = []byte(netNSName)
@@ -189,7 +195,7 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
}
if err := configBkt.Put(missing.key, dbValue); err != nil {
- return errors.Wrapf(err, "error updating %s in DB runtime config", missing.name)
+ return fmt.Errorf("error updating %s in DB runtime config: %w", missing.name, err)
}
}
@@ -230,8 +236,8 @@ func readOnlyValidateConfig(bucket *bolt.Bucket, toCheck dbConfigValidation) (bo
return true, nil
}
- return true, errors.Wrapf(define.ErrDBBadConfig, "database %s %q does not match our %s %q",
- toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue)
+ return true, fmt.Errorf("database %s %q does not match our %s %q: %w",
+ toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue, define.ErrDBBadConfig)
}
return true, nil
@@ -248,7 +254,7 @@ func (s *BoltState) getDBCon() (*bolt.DB, error) {
db, err := bolt.Open(s.dbPath, 0600, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error opening database %s", s.dbPath)
+ return nil, fmt.Errorf("error opening database %s: %w", s.dbPath, err)
}
return db, nil
@@ -277,7 +283,7 @@ func (s *BoltState) closeDBCon(db *bolt.DB) error {
func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(idRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "id registry bucket not found in DB")
+ return nil, fmt.Errorf("id registry bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -285,7 +291,7 @@ func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(nameRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "name registry bucket not found in DB")
+ return nil, fmt.Errorf("name registry bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -293,7 +299,7 @@ func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(nsRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "namespace registry bucket not found in DB")
+ return nil, fmt.Errorf("namespace registry bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -301,7 +307,7 @@ func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(ctrBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "containers bucket not found in DB")
+ return nil, fmt.Errorf("containers bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -309,7 +315,7 @@ func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allCtrsBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "all containers bucket not found in DB")
+ return nil, fmt.Errorf("all containers bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -317,7 +323,7 @@ func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(podBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "pods bucket not found in DB")
+ return nil, fmt.Errorf("pods bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -325,7 +331,7 @@ func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allPodsBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "all pods bucket not found in DB")
+ return nil, fmt.Errorf("all pods bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -333,7 +339,7 @@ func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(volBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "volumes bucket not found in DB")
+ return nil, fmt.Errorf("volumes bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -341,7 +347,7 @@ func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allVolsBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "all volumes bucket not found in DB")
+ return nil, fmt.Errorf("all volumes bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -349,7 +355,7 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(execBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "exec bucket not found in DB")
+ return nil, fmt.Errorf("exec bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -357,7 +363,23 @@ func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(runtimeConfigBkt)
if bkt == nil {
- return nil, errors.Wrapf(define.ErrDBBadConfig, "runtime configuration bucket not found in DB")
+ return nil, fmt.Errorf("runtime configuration bucket not found in DB: %w", define.ErrDBBadConfig)
+ }
+ return bkt, nil
+}
+
+func getExitCodeBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(exitCodeBkt)
+ if bkt == nil {
+ return nil, fmt.Errorf("exit-code container bucket not found in DB: %w", define.ErrDBBadConfig)
+ }
+ return bkt, nil
+}
+
+func getExitCodeTimeStampBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(exitCodeTimeStampBkt)
+ if bkt == nil {
+ return nil, fmt.Errorf("exit-code time stamp bucket not found in DB: %w", define.ErrDBBadConfig)
}
return bkt, nil
}
@@ -365,23 +387,23 @@ func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func (s *BoltState) getContainerConfigFromDB(id []byte, config *ContainerConfig, ctrsBkt *bolt.Bucket) error {
ctrBkt := ctrsBkt.Bucket(id)
if ctrBkt == nil {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id))
+ return fmt.Errorf("container %s not found in DB: %w", string(id), define.ErrNoSuchCtr)
}
if s.namespaceBytes != nil {
ctrNamespaceBytes := ctrBkt.Get(namespaceKey)
if !bytes.Equal(s.namespaceBytes, ctrNamespaceBytes) {
- return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve container %s as it is part of namespace %q and we are in namespace %q", string(id), string(ctrNamespaceBytes), s.namespace)
+ return fmt.Errorf("cannot retrieve container %s as it is part of namespace %q and we are in namespace %q: %w", string(id), string(ctrNamespaceBytes), s.namespace, define.ErrNSMismatch)
}
}
configBytes := ctrBkt.Get(configKey)
if configBytes == nil {
- return errors.Wrapf(define.ErrInternal, "container %s missing config key in DB", string(id))
+ return fmt.Errorf("container %s missing config key in DB: %w", string(id), define.ErrInternal)
}
if err := json.Unmarshal(configBytes, config); err != nil {
- return errors.Wrapf(err, "error unmarshalling container %s config", string(id))
+ return fmt.Errorf("error unmarshalling container %s config: %w", string(id), err)
}
// convert ports to the new format if needed
@@ -404,7 +426,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(ctr.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock for container %s", string(id))
+ return fmt.Errorf("error retrieving lock for container %s: %w", string(id), err)
}
ctr.lock = lock
@@ -451,29 +473,29 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error {
podDB := podBkt.Bucket(id)
if podDB == nil {
- return errors.Wrapf(define.ErrNoSuchPod, "pod with ID %s not found", string(id))
+ return fmt.Errorf("pod with ID %s not found: %w", string(id), define.ErrNoSuchPod)
}
if s.namespaceBytes != nil {
podNamespaceBytes := podDB.Get(namespaceKey)
if !bytes.Equal(s.namespaceBytes, podNamespaceBytes) {
- return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q", string(id), string(podNamespaceBytes), s.namespace)
+ return fmt.Errorf("cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q: %w", string(id), string(podNamespaceBytes), s.namespace, define.ErrNSMismatch)
}
}
podConfigBytes := podDB.Get(configKey)
if podConfigBytes == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s is missing configuration key in DB", string(id))
+ return fmt.Errorf("pod %s is missing configuration key in DB: %w", string(id), define.ErrInternal)
}
if err := json.Unmarshal(podConfigBytes, pod.config); err != nil {
- return errors.Wrapf(err, "error unmarshalling pod %s config from DB", string(id))
+ return fmt.Errorf("error unmarshalling pod %s config from DB: %w", string(id), err)
}
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(pod.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock for pod %s", string(id))
+ return fmt.Errorf("error retrieving lock for pod %s: %w", string(id), err)
}
pod.lock = lock
@@ -486,29 +508,29 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bucket) error {
volDB := volBkt.Bucket(name)
if volDB == nil {
- return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %s not found", string(name))
+ return fmt.Errorf("volume with name %s not found: %w", string(name), define.ErrNoSuchVolume)
}
volConfigBytes := volDB.Get(configKey)
if volConfigBytes == nil {
- return errors.Wrapf(define.ErrInternal, "volume %s is missing configuration key in DB", string(name))
+ return fmt.Errorf("volume %s is missing configuration key in DB: %w", string(name), define.ErrInternal)
}
if err := json.Unmarshal(volConfigBytes, volume.config); err != nil {
- return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
+ return fmt.Errorf("error unmarshalling volume %s config from DB: %w", string(name), err)
}
// Volume state is allowed to be nil for legacy compatibility
volStateBytes := volDB.Get(stateKey)
if volStateBytes != nil {
if err := json.Unmarshal(volStateBytes, volume.state); err != nil {
- return errors.Wrapf(err, "error unmarshalling volume %s state from DB", string(name))
+ return fmt.Errorf("error unmarshalling volume %s state from DB: %w", string(name), err)
}
}
// Retrieve volume driver
if volume.UsesVolumeDriver() {
- plugin, err := s.runtime.getVolumePlugin(volume.config.Driver)
+ plugin, err := s.runtime.getVolumePlugin(volume.config)
if err != nil {
// We want to fail gracefully here, to ensure that we
// can still remove volumes even if their plugin is
@@ -524,7 +546,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock for volume %q", string(name))
+ return fmt.Errorf("error retrieving lock for volume %q: %w", string(name), err)
}
volume.lock = lock
@@ -538,8 +560,8 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
// If pod is not nil, the container is added to the pod as well
func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "cannot add container %s as it is in namespace %q and we are in namespace %q",
- ctr.ID(), s.namespace, ctr.config.Namespace)
+ return fmt.Errorf("cannot add container %s as it is in namespace %q and we are in namespace %q: %w",
+ ctr.ID(), s.namespace, ctr.config.Namespace, define.ErrNSMismatch)
}
// Set the original networks to nil. We can save some space by not storing it in the config
@@ -550,11 +572,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
// JSON container structs to insert into DB
configJSON, err := json.Marshal(ctr.config)
if err != nil {
- return errors.Wrapf(err, "error marshalling container %s config to JSON", ctr.ID())
+ return fmt.Errorf("error marshalling container %s config to JSON: %w", ctr.ID(), err)
}
stateJSON, err := json.Marshal(ctr.state)
if err != nil {
- return errors.Wrapf(err, "error marshalling container %s state to JSON", ctr.ID())
+ return fmt.Errorf("error marshalling container %s state to JSON: %w", ctr.ID(), err)
}
netNSPath := getNetNSPath(ctr)
dependsCtrs := ctr.Dependencies()
@@ -572,16 +594,16 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
for net, opts := range configNetworks {
// Check that we don't have any empty network names
if net == "" {
- return errors.Wrapf(define.ErrInvalidArg, "network names cannot be an empty string")
+ return fmt.Errorf("network names cannot be an empty string: %w", define.ErrInvalidArg)
}
if opts.InterfaceName == "" {
- return errors.Wrapf(define.ErrInvalidArg, "network interface name cannot be an empty string")
+ return fmt.Errorf("network interface name cannot be an empty string: %w", define.ErrInvalidArg)
}
// always add the short id as alias for docker compat
opts.Aliases = append(opts.Aliases, ctr.config.ID[:12])
optBytes, err := json.Marshal(opts)
if err != nil {
- return errors.Wrapf(err, "error marshalling network options JSON for container %s", ctr.ID())
+ return fmt.Errorf("error marshalling network options JSON for container %s: %w", ctr.ID(), err)
}
networks[net] = optBytes
}
@@ -637,17 +659,17 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
podDB = podBucket.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in database", pod.ID())
+ return fmt.Errorf("pod %s does not exist in database: %w", pod.ID(), define.ErrNoSuchPod)
}
podCtrs = podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID())
+ return fmt.Errorf("pod %s does not have a containers bucket: %w", pod.ID(), define.ErrInternal)
}
podNS := podDB.Get(namespaceKey)
if !bytes.Equal(podNS, ctrNamespace) {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s",
- ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace)
+ return fmt.Errorf("container %s is in namespace %s and pod %s is in namespace %s: %w",
+ ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace, define.ErrNSMismatch)
}
}
@@ -658,7 +680,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if allCtrsBucket.Get(idExist) == nil {
err = define.ErrPodExists
}
- return errors.Wrapf(err, "ID \"%s\" is in use", ctr.ID())
+ return fmt.Errorf("ID \"%s\" is in use: %w", ctr.ID(), err)
}
nameExist := namesBucket.Get(ctrName)
if nameExist != nil {
@@ -666,66 +688,66 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if allCtrsBucket.Get(nameExist) == nil {
err = define.ErrPodExists
}
- return errors.Wrapf(err, "name \"%s\" is in use", ctr.Name())
+ return fmt.Errorf("name \"%s\" is in use: %w", ctr.Name(), err)
}
// No overlapping containers
// Add the new container to the DB
if err := idsBucket.Put(ctrID, ctrName); err != nil {
- return errors.Wrapf(err, "error adding container %s ID to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s ID to DB: %w", ctr.ID(), err)
}
if err := namesBucket.Put(ctrName, ctrID); err != nil {
- return errors.Wrapf(err, "error adding container %s name (%s) to DB", ctr.ID(), ctr.Name())
+ return fmt.Errorf("error adding container %s name (%s) to DB: %w", ctr.ID(), ctr.Name(), err)
}
if ctrNamespace != nil {
if err := nsBucket.Put(ctrID, ctrNamespace); err != nil {
- return errors.Wrapf(err, "error adding container %s namespace (%q) to DB", ctr.ID(), ctr.Namespace())
+ return fmt.Errorf("error adding container %s namespace (%q) to DB: %w", ctr.ID(), ctr.Namespace(), err)
}
}
if err := allCtrsBucket.Put(ctrID, ctrName); err != nil {
- return errors.Wrapf(err, "error adding container %s to all containers bucket in DB", ctr.ID())
+ return fmt.Errorf("error adding container %s to all containers bucket in DB: %w", ctr.ID(), err)
}
newCtrBkt, err := ctrBucket.CreateBucket(ctrID)
if err != nil {
- return errors.Wrapf(err, "error adding container %s bucket to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s bucket to DB: %w", ctr.ID(), err)
}
if err := newCtrBkt.Put(configKey, configJSON); err != nil {
- return errors.Wrapf(err, "error adding container %s config to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s config to DB: %w", ctr.ID(), err)
}
if err := newCtrBkt.Put(stateKey, stateJSON); err != nil {
- return errors.Wrapf(err, "error adding container %s state to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s state to DB: %w", ctr.ID(), err)
}
if ctrNamespace != nil {
if err := newCtrBkt.Put(namespaceKey, ctrNamespace); err != nil {
- return errors.Wrapf(err, "error adding container %s namespace to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s namespace to DB: %w", ctr.ID(), err)
}
}
if pod != nil {
if err := newCtrBkt.Put(podIDKey, []byte(pod.ID())); err != nil {
- return errors.Wrapf(err, "error adding container %s pod to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s pod to DB: %w", ctr.ID(), err)
}
}
if netNSPath != "" {
if err := newCtrBkt.Put(netNSKey, []byte(netNSPath)); err != nil {
- return errors.Wrapf(err, "error adding container %s netns path to DB", ctr.ID())
+ return fmt.Errorf("error adding container %s netns path to DB: %w", ctr.ID(), err)
}
}
if len(networks) > 0 {
ctrNetworksBkt, err := newCtrBkt.CreateBucket(networksBkt)
if err != nil {
- return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID())
+ return fmt.Errorf("error creating networks bucket for container %s: %w", ctr.ID(), err)
}
for network, opts := range networks {
if err := ctrNetworksBkt.Put([]byte(network), opts); err != nil {
- return errors.Wrapf(err, "error adding network %q to networks bucket for container %s", network, ctr.ID())
+ return fmt.Errorf("error adding network %q to networks bucket for container %s: %w", network, ctr.ID(), err)
}
}
}
if _, err := newCtrBkt.CreateBucket(dependenciesBkt); err != nil {
- return errors.Wrapf(err, "error creating dependencies bucket for container %s", ctr.ID())
+ return fmt.Errorf("error creating dependencies bucket for container %s: %w", ctr.ID(), err)
}
// Add dependencies for the container
@@ -734,42 +756,42 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
depCtrBkt := ctrBucket.Bucket(depCtrID)
if depCtrBkt == nil {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s, but it does not exist in the DB", ctr.ID(), dependsCtr)
+ return fmt.Errorf("container %s depends on container %s, but it does not exist in the DB: %w", ctr.ID(), dependsCtr, define.ErrNoSuchCtr)
}
depCtrPod := depCtrBkt.Get(podIDKey)
if pod != nil {
// If we're part of a pod, make sure the dependency is part of the same pod
if depCtrPod == nil {
- return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is not in pod %s", ctr.ID(), dependsCtr, pod.ID())
+ return fmt.Errorf("container %s depends on container %s which is not in pod %s: %w", ctr.ID(), dependsCtr, pod.ID(), define.ErrInvalidArg)
}
if string(depCtrPod) != pod.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a different pod (%s)", ctr.ID(), dependsCtr, string(depCtrPod))
+ return fmt.Errorf("container %s depends on container %s which is in a different pod (%s): %w", ctr.ID(), dependsCtr, string(depCtrPod), define.ErrInvalidArg)
}
} else if depCtrPod != nil {
// If we're not part of a pod, we cannot depend on containers in a pod
- return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods", ctr.ID(), dependsCtr)
+ return fmt.Errorf("container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods: %w", ctr.ID(), dependsCtr, define.ErrInvalidArg)
}
depNamespace := depCtrBkt.Get(namespaceKey)
if !bytes.Equal(ctrNamespace, depNamespace) {
- return errors.Wrapf(define.ErrNSMismatch, "container %s in namespace %q depends on container %s in namespace %q - namespaces must match", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace))
+ return fmt.Errorf("container %s in namespace %q depends on container %s in namespace %q - namespaces must match: %w", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace), define.ErrNSMismatch)
}
depCtrDependsBkt := depCtrBkt.Bucket(dependenciesBkt)
if depCtrDependsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", dependsCtr)
+ return fmt.Errorf("container %s does not have a dependencies bucket: %w", dependsCtr, define.ErrInternal)
}
if err := depCtrDependsBkt.Put(ctrID, ctrName); err != nil {
- return errors.Wrapf(err, "error adding ctr %s as dependency of container %s", ctr.ID(), dependsCtr)
+ return fmt.Errorf("error adding ctr %s as dependency of container %s: %w", ctr.ID(), dependsCtr, err)
}
}
// Add ctr to pod
if pod != nil && podCtrs != nil {
if err := podCtrs.Put(ctrID, ctrName); err != nil {
- return errors.Wrapf(err, "error adding container %s to pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("error adding container %s to pod %s: %w", ctr.ID(), pod.ID(), err)
}
}
@@ -777,16 +799,16 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
for _, vol := range ctr.config.NamedVolumes {
volDB := volBkt.Bucket([]byte(vol.Name))
if volDB == nil {
- return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
+ return fmt.Errorf("no volume with name %s found in database when adding container %s: %w", vol.Name, ctr.ID(), define.ErrNoSuchVolume)
}
ctrDepsBkt, err := volDB.CreateBucketIfNotExists(volDependenciesBkt)
if err != nil {
- return errors.Wrapf(err, "error creating volume %s dependencies bucket to add container %s", vol.Name, ctr.ID())
+ return fmt.Errorf("error creating volume %s dependencies bucket to add container %s: %w", vol.Name, ctr.ID(), err)
}
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
- return errors.Wrapf(err, "error adding container %s to volume %s dependencies", ctr.ID(), vol.Name)
+ return fmt.Errorf("error adding container %s to volume %s dependencies: %w", ctr.ID(), vol.Name, err)
}
}
}
@@ -846,7 +868,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
podDB = podBucket.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ return fmt.Errorf("no pod with ID %s found in DB: %w", pod.ID(), define.ErrNoSuchPod)
}
}
@@ -854,17 +876,17 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
ctrExists := ctrBucket.Bucket(ctrID)
if ctrExists == nil {
ctr.valid = false
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ return fmt.Errorf("no container with ID %s found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
}
// Compare namespace
// We can't remove containers not in our namespace
if s.namespace != "" {
if s.namespace != ctr.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
}
if pod != nil && s.namespace != pod.config.Namespace {
- return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q, does not match out namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return fmt.Errorf("pod %s is in namespace %q, does not match out namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
}
}
@@ -877,10 +899,10 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
} else {
ctrInPod := podCtrs.Get(ctrID)
if ctrInPod == nil {
- return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not in pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("container %s is not in pod %s: %w", ctr.ID(), pod.ID(), define.ErrNoSuchCtr)
}
if err := podCtrs.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error removing container %s from pod %s", ctr.ID(), pod.ID())
+ return fmt.Errorf("error removing container %s from pod %s: %w", ctr.ID(), pod.ID(), err)
}
}
}
@@ -898,14 +920,14 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
return err
}
if len(sessions) > 0 {
- return errors.Wrapf(define.ErrExecSessionExists, "container %s has active exec sessions: %s", ctr.ID(), strings.Join(sessions, ", "))
+ return fmt.Errorf("container %s has active exec sessions: %s: %w", ctr.ID(), strings.Join(sessions, ", "), define.ErrExecSessionExists)
}
}
// Does the container have dependencies?
ctrDepsBkt := ctrExists.Bucket(dependenciesBkt)
if ctrDepsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", ctr.ID())
+ return fmt.Errorf("container %s does not have a dependencies bucket: %w", ctr.ID(), define.ErrInternal)
}
deps := []string{}
err = ctrDepsBkt.ForEach(func(id, value []byte) error {
@@ -917,25 +939,25 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
return err
}
if len(deps) != 0 {
- return errors.Wrapf(define.ErrDepExists, "container %s is a dependency of the following containers: %s", ctr.ID(), strings.Join(deps, ", "))
+ return fmt.Errorf("container %s is a dependency of the following containers: %s: %w", ctr.ID(), strings.Join(deps, ", "), define.ErrDepExists)
}
if err := ctrBucket.DeleteBucket(ctrID); err != nil {
- return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s from DB: %w", ctr.ID(), define.ErrInternal)
}
if err := idsBucket.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container %s ID in DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s ID in DB: %w", ctr.ID(), err)
}
if err := namesBucket.Delete(ctrName); err != nil {
- return errors.Wrapf(err, "error deleting container %s name in DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s name in DB: %w", ctr.ID(), err)
}
if err := nsBucket.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container %s namespace in DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s namespace in DB: %w", ctr.ID(), err)
}
if err := allCtrsBucket.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container %s from all containers bucket in DB", ctr.ID())
+ return fmt.Errorf("error deleting container %s from all containers bucket in DB: %w", ctr.ID(), err)
}
depCtrs := ctr.Dependencies()
@@ -964,7 +986,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
if err := depCtrDependsBkt.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error removing container %s as a dependency of container %s", ctr.ID(), depCtr)
+ return fmt.Errorf("error removing container %s as a dependency of container %s: %w", ctr.ID(), depCtr, err)
}
}
@@ -979,11 +1001,11 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
if ctrDepsBkt == nil {
- return errors.Wrapf(define.ErrInternal, "volume %s is missing container dependencies bucket, cannot remove container %s from dependencies", vol.Name, ctr.ID())
+ return fmt.Errorf("volume %s is missing container dependencies bucket, cannot remove container %s from dependencies: %w", vol.Name, ctr.ID(), define.ErrInternal)
}
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
if err := ctrDepsBkt.Delete(ctrID); err != nil {
- return errors.Wrapf(err, "error deleting container %s dependency on volume %s", ctr.ID(), vol.Name)
+ return fmt.Errorf("error deleting container %s dependency on volume %s: %w", ctr.ID(), vol.Name, err)
}
}
}
@@ -1040,7 +1062,7 @@ func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, n
}
if strings.HasPrefix(string(checkID), idOrName) {
if exists {
- return errors.Wrapf(define.ErrCtrExists, "more than one result for container ID %s", idOrName)
+ return fmt.Errorf("more than one result for container ID %s: %w", idOrName, define.ErrCtrExists)
}
id = checkID
exists = true
@@ -1053,9 +1075,9 @@ func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, n
return nil, err
} else if !exists {
if isPod {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "%q is a pod, not a container", idOrName)
+ return nil, fmt.Errorf("%q is a pod, not a container: %w", idOrName, define.ErrNoSuchCtr)
}
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %q found", idOrName)
+ return nil, fmt.Errorf("no container with name or ID %q found: %w", idOrName, define.ErrNoSuchCtr)
}
return id, nil
}
diff --git a/libpod/boltdb_state_linux.go b/libpod/boltdb_state_linux.go
index 8bb10fb63..813afd8bf 100644
--- a/libpod/boltdb_state_linux.go
+++ b/libpod/boltdb_state_linux.go
@@ -4,8 +4,9 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -29,7 +30,7 @@ func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) er
newState.NetNS = ns
} else {
if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
- return errors.Wrapf(err, "error joining network namespace of container %s", ctr.ID())
+ return fmt.Errorf("error joining network namespace of container %s: %w", ctr.ID(), err)
}
logrus.Errorf("Joining network namespace for container %s: %v", ctr.ID(), err)
diff --git a/libpod/container.go b/libpod/container.go
index 64b4453fb..4e2d93860 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -19,7 +19,6 @@ import (
"github.com/containers/podman/v4/libpod/lock"
"github.com/containers/storage"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -355,14 +354,14 @@ func (c *Container) specFromState() (*spec.Spec, error) {
returnSpec = new(spec.Spec)
content, err := ioutil.ReadAll(f)
if err != nil {
- return nil, errors.Wrapf(err, "error reading container config")
+ return nil, fmt.Errorf("error reading container config: %w", err)
}
if err := json.Unmarshal(content, &returnSpec); err != nil {
- return nil, errors.Wrapf(err, "error unmarshalling container config")
+ return nil, fmt.Errorf("error unmarshalling container config: %w", err)
}
} else if !os.IsNotExist(err) {
// ignore when the file does not exist
- return nil, errors.Wrapf(err, "error opening container config")
+ return nil, fmt.Errorf("error opening container config: %w", err)
}
return returnSpec, nil
@@ -518,7 +517,7 @@ func (c *Container) PortMappings() ([]types.PortMapping, error) {
if len(c.config.NetNsCtr) > 0 {
netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr)
if err != nil {
- return nil, errors.Wrapf(err, "unable to lookup network namespace for container %s", c.ID())
+ return nil, fmt.Errorf("unable to look up network namespace for container %s: %w", c.ID(), err)
}
return netNsCtr.PortMappings()
}
@@ -657,7 +656,7 @@ func (c *Container) Hostname() string {
utsNsCtr, err := c.runtime.GetContainer(c.config.UTSNsCtr)
if err != nil {
// should we return an error here?
- logrus.Errorf("unable to lookup uts namespace for container %s: %v", c.ID(), err)
+ logrus.Errorf("unable to look up uts namespace for container %s: %v", c.ID(), err)
return ""
}
return utsNsCtr.Hostname()
@@ -705,7 +704,7 @@ func (c *Container) Mounted() (bool, string, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return false, "", errors.Wrapf(err, "error updating container %s state", c.ID())
+ return false, "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
// We cannot directly return c.state.Mountpoint as it is not guaranteed
@@ -735,7 +734,7 @@ func (c *Container) StartedTime() (time.Time, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return time.Time{}, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return time.Time{}, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.state.StartedTime, nil
@@ -747,7 +746,7 @@ func (c *Container) FinishedTime() (time.Time, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return time.Time{}, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return time.Time{}, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.state.FinishedTime, nil
@@ -762,7 +761,7 @@ func (c *Container) ExitCode() (int32, bool, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return 0, false, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return 0, false, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.state.ExitCode, c.state.Exited, nil
@@ -774,7 +773,7 @@ func (c *Container) OOMKilled() (bool, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return false, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return false, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.state.OOMKilled, nil
@@ -845,7 +844,7 @@ func (c *Container) execSessionNoCopy(id string) (*ExecSession, error) {
session, ok := c.state.ExecSessions[id]
if !ok {
- return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID())
+ return nil, fmt.Errorf("no exec session with ID %s found in container %s: %w", id, c.ID(), define.ErrNoSuchExecSession)
}
return session, nil
@@ -861,7 +860,7 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) {
returnSession := new(ExecSession)
if err := JSONDeepCopy(session, returnSession); err != nil {
- return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID())
+ return nil, fmt.Errorf("error copying contents of container %s exec session %s: %w", c.ID(), session.ID(), err)
}
return returnSession, nil
@@ -921,7 +920,7 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return "", errors.Wrapf(err, "error updating container %s state", c.ID())
+ return "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
@@ -932,11 +931,11 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
// If the container is not running, an error will be returned
func (c *Container) namespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
- return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
+ return "", fmt.Errorf("cannot get namespace path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
}
if linuxNS == InvalidNS {
- return "", errors.Wrapf(define.ErrInvalidArg, "invalid namespace requested from container %s", c.ID())
+ return "", fmt.Errorf("invalid namespace requested from container %s: %w", c.ID(), define.ErrInvalidArg)
}
return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, linuxNS.String()), nil
@@ -959,7 +958,7 @@ func (c *Container) CgroupPath() (string, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return "", errors.Wrapf(err, "error updating container %s state", c.ID())
+ return "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.cGroupPath()
@@ -971,10 +970,10 @@ func (c *Container) CgroupPath() (string, error) {
// NOTE: only call this when owning the container's lock.
func (c *Container) cGroupPath() (string, error) {
if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
- return "", errors.Wrapf(define.ErrNoCgroups, "this container is not creating cgroups")
+ return "", fmt.Errorf("this container is not creating cgroups: %w", define.ErrNoCgroups)
}
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
- return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID())
+ return "", fmt.Errorf("cannot get cgroup path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
}
// Read /proc/{PID}/cgroup and find the *longest* cgroup entry. That's
@@ -995,7 +994,7 @@ func (c *Container) cGroupPath() (string, error) {
// If the file doesn't exist, it means the container could have been terminated
// so report it.
if os.IsNotExist(err) {
- return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID())
+ return "", fmt.Errorf("cannot get cgroup path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
}
return "", err
}
@@ -1024,7 +1023,7 @@ func (c *Container) cGroupPath() (string, error) {
}
if len(cgroupPath) == 0 {
- return "", errors.Errorf("could not find any cgroup in %q", procPath)
+ return "", fmt.Errorf("could not find any cgroup in %q", procPath)
}
cgroupManager := c.CgroupManager()
@@ -1059,7 +1058,7 @@ func (c *Container) RootFsSize() (int64, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return -1, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return -1, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.rootFsSize()
@@ -1071,7 +1070,7 @@ func (c *Container) RWSize() (int64, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return -1, errors.Wrapf(err, "error updating container %s state", c.ID())
+ return -1, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
}
}
return c.rwSize()
@@ -1118,7 +1117,7 @@ func (c *Container) IsInitCtr() bool {
return len(c.config.InitContainerType) > 0
}
-// IsReadOnly returns whether the container is running in read only mode
+// IsReadOnly returns whether the container is running in read-only mode
func (c *Container) IsReadOnly() bool {
return c.config.Spec.Root.Readonly
}
@@ -1173,7 +1172,7 @@ func (c *Container) ContainerState() (*ContainerState, error) {
}
returnConfig := new(ContainerState)
if err := JSONDeepCopy(c.state, returnConfig); err != nil {
- return nil, errors.Wrapf(err, "error copying container %s state", c.ID())
+ return nil, fmt.Errorf("error copying container %s state: %w", c.ID(), err)
}
return c.state, nil
}
@@ -1331,9 +1330,57 @@ func (c *Container) getNetworkStatus() map[string]types.StatusBlock {
}
c.state.NetworkStatus = result
_ = c.save()
- // TODO remove debug for final version
- logrus.Debugf("converted old network result to new result %v", result)
+
return result
}
return nil
}
+
+func (c *Container) NamespaceMode(ns spec.LinuxNamespaceType, ctrSpec *spec.Spec) string {
+ switch ns {
+ case spec.UTSNamespace:
+ if c.config.UTSNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.UTSNsCtr)
+ }
+ case spec.CgroupNamespace:
+ if c.config.CgroupNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.CgroupNsCtr)
+ }
+ case spec.IPCNamespace:
+ if c.config.IPCNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.IPCNsCtr)
+ }
+ case spec.PIDNamespace:
+ if c.config.PIDNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.PIDNsCtr)
+ }
+ case spec.UserNamespace:
+ if c.config.UserNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.UserNsCtr)
+ }
+ case spec.NetworkNamespace:
+ if c.config.NetNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.NetNsCtr)
+ }
+ case spec.MountNamespace:
+ if c.config.MountNsCtr != "" {
+ return fmt.Sprintf("container:%s", c.config.MountNsCtr)
+ }
+ }
+
+ if ctrSpec.Linux != nil {
+ // Locate the spec's given namespace.
+ // If there is none, it's namespace=host.
+ // If there is one and it has a path, it's "ns:".
+ // If there is no path, it's default - the empty string.
+ for _, availableNS := range ctrSpec.Linux.Namespaces {
+ if availableNS.Type == ns {
+ if availableNS.Path != "" {
+ return fmt.Sprintf("ns:%s", availableNS.Path)
+ }
+ return "private"
+ }
+ }
+ }
+ return "host"
+}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index a6fcf709d..dbd5fc1fb 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -2,6 +2,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"net/http"
@@ -9,11 +11,11 @@ import (
"sync"
"time"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/signal"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,7 +40,7 @@ func (c *Container) Init(ctx context.Context, recursive bool) error {
}
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateStopped, define.ContainerStateExited) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
+ return fmt.Errorf("container %s has already been created in runtime: %w", c.ID(), define.ErrCtrStateInvalid)
}
if !recursive {
@@ -102,7 +104,7 @@ func (c *Container) Start(ctx context.Context, recursive bool) error {
// Attach call occurs before Start).
// In overall functionality, it is identical to the Start call, with the added
// side effect that an attach session will also be started.
-func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize, recursive bool) (<-chan error, error) {
+func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan resize.TerminalSize, recursive bool) (<-chan error, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -123,7 +125,18 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachSt
// Attach to the container before starting it
go func() {
- if err := c.attach(streams, keys, resize, true, startedChan, nil); err != nil {
+ // Start resizing
+ if c.LogDriver() != define.PassthroughLogging {
+ registerResizeFunc(resize, c.bundlePath())
+ }
+
+ opts := new(AttachOptions)
+ opts.Streams = streams
+ opts.DetachKeys = &keys
+ opts.Start = true
+ opts.Started = startedChan
+
+ if err := c.ociRuntime.Attach(c, opts); err != nil {
attachChan <- err
}
close(attachChan)
@@ -185,7 +198,7 @@ func (c *Container) StopWithTimeout(timeout uint) error {
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopping) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created or running containers. %s is in state %s", c.ID(), c.state.State.String())
+ return fmt.Errorf("can only stop created or running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
}
return c.stop(timeout)
@@ -202,15 +215,14 @@ func (c *Container) Kill(signal uint) error {
}
}
- // TODO: Is killing a paused container OK?
switch c.state.State {
- case define.ContainerStateRunning, define.ContainerStateStopping:
+ case define.ContainerStateRunning, define.ContainerStateStopping, define.ContainerStatePaused:
// Note that killing containers in "stopping" state is okay.
// In that state, the Podman is waiting for the runtime to
// stop the container and if that is taking too long, a user
// may have decided to kill the container after all.
default:
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
+ return fmt.Errorf("can only kill running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
}
// Hardcode all = false, we only use all when removing.
@@ -228,9 +240,9 @@ func (c *Container) Kill(signal uint) error {
// Attach attaches to a container.
// This function returns when the attach finishes. It does not hold the lock for
// the duration of its runtime, only using it at the beginning to verify state.
-func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize) error {
+func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan resize.TerminalSize) error {
if c.LogDriver() == define.PassthroughLogging {
- return errors.Wrapf(define.ErrNoLogs, "this container is using the 'passthrough' log driver, cannot attach")
+ return fmt.Errorf("this container is using the 'passthrough' log driver, cannot attach: %w", define.ErrNoLogs)
}
if !c.batched {
c.lock.Lock()
@@ -243,7 +255,7 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
+ return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid)
}
// HACK: This is really gross, but there isn't a better way without
@@ -261,8 +273,18 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-
}()
}
+ // Start resizing
+ if c.LogDriver() != define.PassthroughLogging {
+ registerResizeFunc(resize, c.bundlePath())
+ }
+
+ opts := new(AttachOptions)
+ opts.Streams = streams
+ opts.DetachKeys = &keys
+ opts.AttachReady = attachRdy
+
c.newContainerEvent(events.Attach)
- return c.attach(streams, keys, resize, false, nil, attachRdy)
+ return c.ociRuntime.Attach(c, opts)
}
// HTTPAttach forwards an attach session over a hijacked HTTP session.
@@ -299,11 +321,11 @@ func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams *
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
+ return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid)
}
if !streamAttach && !streamLogs {
- return errors.Wrapf(define.ErrInvalidArg, "must specify at least one of stream or logs")
+ return fmt.Errorf("must specify at least one of stream or logs: %w", define.ErrInvalidArg)
}
logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID())
@@ -314,7 +336,7 @@ func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams *
// AttachResize resizes the container's terminal, which is displayed by Attach
// and HTTPAttach.
-func (c *Container) AttachResize(newSize define.TerminalSize) error {
+func (c *Container) AttachResize(newSize resize.TerminalSize) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -325,7 +347,7 @@ func (c *Container) AttachResize(newSize define.TerminalSize) error {
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only resize created or running containers")
+ return fmt.Errorf("can only resize created or running containers: %w", define.ErrCtrStateInvalid)
}
logrus.Infof("Resizing TTY of container %s", c.ID())
@@ -362,20 +384,20 @@ func (c *Container) Unmount(force bool) error {
if c.state.Mounted {
mounted, err := c.runtime.storageService.MountedContainerImage(c.ID())
if err != nil {
- return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID())
+ return fmt.Errorf("can't determine how many times %s is mounted, refusing to unmount: %w", c.ID(), err)
}
if mounted == 1 {
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
+ return fmt.Errorf("cannot unmount storage for container %s as it is running or paused: %w", c.ID(), define.ErrCtrStateInvalid)
}
execSessions, err := c.getActiveExecSessions()
if err != nil {
return err
}
if len(execSessions) != 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
+ return fmt.Errorf("container %s has active exec sessions, refusing to unmount: %w", c.ID(), define.ErrCtrStateInvalid)
}
- return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
+ return fmt.Errorf("can't unmount %s last mount, it is still in use: %w", c.ID(), define.ErrInternal)
}
}
defer c.newContainerEvent(events.Unmount)
@@ -394,10 +416,10 @@ func (c *Container) Pause() error {
}
if c.state.State == define.ContainerStatePaused {
- return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID())
+ return fmt.Errorf("%q is already paused: %w", c.ID(), define.ErrCtrStateInvalid)
}
if c.state.State != define.ContainerStateRunning {
- return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
+ return fmt.Errorf("%q is not running, can't pause: %w", c.state.State, define.ErrCtrStateInvalid)
}
defer c.newContainerEvent(events.Pause)
return c.pause()
@@ -415,7 +437,7 @@ func (c *Container) Unpause() error {
}
if c.state.State != define.ContainerStatePaused {
- return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
+ return fmt.Errorf("%q is not paused, can't unpause: %w", c.ID(), define.ErrCtrStateInvalid)
}
defer c.newContainerEvent(events.Unpause)
return c.unpause()
@@ -434,7 +456,7 @@ func (c *Container) Export(path string) error {
}
if c.state.State == define.ContainerStateRemoving {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
+ return fmt.Errorf("cannot mount container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
}
defer c.newContainerEvent(events.Mount)
@@ -447,7 +469,7 @@ func (c *Container) AddArtifact(name string, data []byte) error {
return define.ErrCtrRemoved
}
- return ioutil.WriteFile(c.getArtifactPath(name), data, 0740)
+ return ioutil.WriteFile(c.getArtifactPath(name), data, 0o740)
}
// GetArtifact reads the specified artifact file from the container
@@ -470,41 +492,84 @@ func (c *Container) RemoveArtifact(name string) error {
// Wait blocks until the container exits and returns its exit code.
func (c *Container) Wait(ctx context.Context) (int32, error) {
- return c.WaitWithInterval(ctx, DefaultWaitInterval)
+ return c.WaitForExit(ctx, DefaultWaitInterval)
}
-// WaitWithInterval blocks until the container to exit and returns its exit
-// code. The argument is the interval at which checks the container's status.
-func (c *Container) WaitWithInterval(ctx context.Context, waitTimeout time.Duration) (int32, error) {
+// WaitForExit blocks until the container exits and returns its exit code. The
+// argument is the interval at which checks the container's status.
+func (c *Container) WaitForExit(ctx context.Context, pollInterval time.Duration) (int32, error) {
if !c.valid {
return -1, define.ErrCtrRemoved
}
- exitFile, err := c.exitFilePath()
- if err != nil {
- return -1, err
- }
- chWait := make(chan error, 1)
+ id := c.ID()
+ var conmonTimer time.Timer
+ conmonTimerSet := false
- go func() {
- <-ctx.Done()
- chWait <- define.ErrCanceled
- }()
+ getExitCode := func() (bool, int32, error) {
+ containerRemoved := false
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ }
- for {
- // ignore errors here (with exception of cancellation), it is only used to avoid waiting
- // too long.
- _, e := WaitForFile(exitFile, chWait, waitTimeout)
- if e == define.ErrCanceled {
- return -1, define.ErrCanceled
+ if err := c.syncContainer(); err != nil {
+ if !errors.Is(err, define.ErrNoSuchCtr) {
+ return false, -1, err
+ }
+ containerRemoved = true
+ }
+
+ // If conmon is not alive anymore set a timer to make sure
+ // we're returning even if conmon has forcefully been killed.
+ if !conmonTimerSet && !containerRemoved {
+ conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
+ switch {
+ case errors.Is(err, define.ErrNoSuchCtr):
+ containerRemoved = true
+ case err != nil:
+ return false, -1, err
+ case !conmonAlive:
+ timerDuration := time.Second * 20
+ conmonTimer = *time.NewTimer(timerDuration)
+ conmonTimerSet = true
+ }
+ }
+
+ if !containerRemoved {
+ // If conmon is dead for more than $timerDuration or if the
+ // container has exited properly, try to look up the exit code.
+ select {
+ case <-conmonTimer.C:
+ logrus.Debugf("Exceeded conmon timeout waiting for container %s to exit", id)
+ default:
+ if !c.ensureState(define.ContainerStateExited, define.ContainerStateConfigured) {
+ return false, -1, nil
+ }
+ }
}
- stopped, code, err := c.isStopped()
+ exitCode, err := c.runtime.state.GetContainerExitCode(id)
+ if err != nil {
+ return true, -1, err
+ }
+
+ return true, exitCode, nil
+ }
+
+ for {
+ hasExited, exitCode, err := getExitCode()
+ if hasExited {
+ return exitCode, err
+ }
if err != nil {
return -1, err
}
- if stopped {
- return code, nil
+ select {
+ case <-ctx.Done():
+ return -1, fmt.Errorf("waiting for exit code of container %s canceled", id)
+ default:
+ time.Sleep(pollInterval)
}
}
}
@@ -531,11 +596,12 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou
wantedStates := make(map[define.ContainerStatus]bool, len(conditions))
for _, condition := range conditions {
- if condition == define.ContainerStateStopped || condition == define.ContainerStateExited {
+ switch condition {
+ case define.ContainerStateExited, define.ContainerStateStopped:
waitForExit = true
- continue
+ default:
+ wantedStates[condition] = true
}
- wantedStates[condition] = true
}
trySend := func(code int32, err error) {
@@ -552,7 +618,7 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou
go func() {
defer wg.Done()
- code, err := c.WaitWithInterval(ctx, waitTimeout)
+ code, err := c.WaitForExit(ctx, waitTimeout)
trySend(code, err)
}()
}
@@ -601,13 +667,21 @@ func (c *Container) Cleanup(ctx context.Context) error {
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
+ // When the container has already been removed, the OCI runtime directory remain.
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
+ if err := c.cleanupRuntime(ctx); err != nil {
+ return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err)
+ }
+ return nil
+ }
+ logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
return err
}
}
// Check if state is good
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
+ return fmt.Errorf("container %s is running or paused, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
}
// Handle restart policy.
@@ -629,7 +703,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
return err
}
if len(sessions) > 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
+ return fmt.Errorf("container %s has active exec sessions, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
}
defer c.newContainerEvent(events.Cleanup)
@@ -687,19 +761,8 @@ func (c *Container) Sync() error {
defer c.lock.Unlock()
}
- // If runtime knows about the container, update its status in runtime
- // And then save back to disk
- if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopped, define.ContainerStateStopping) {
- oldState := c.state.State
- if err := c.ociRuntime.UpdateContainerStatus(c); err != nil {
- return err
- }
- // Only save back to DB if state changed
- if c.state.State != oldState {
- if err := c.save(); err != nil {
- return err
- }
- }
+ if err := c.syncContainer(); err != nil {
+ return err
}
defer c.newContainerEvent(events.Sync)
@@ -726,7 +789,7 @@ func (c *Container) ReloadNetwork() error {
}
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot reload network unless container network has been configured")
+ return fmt.Errorf("cannot reload network unless container network has been configured: %w", define.ErrCtrStateInvalid)
}
return c.reloadNetwork()
@@ -878,7 +941,7 @@ func (c *Container) ShouldRestart(ctx context.Context) bool {
// CopyFromArchive copies the contents from the specified tarStream to path
// *inside* the container.
-func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, chown bool, rename map[string]string, tarStream io.Reader) (func() error, error) {
+func (c *Container) CopyFromArchive(_ context.Context, containerPath string, chown, noOverwriteDirNonDir bool, rename map[string]string, tarStream io.Reader) (func() error, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -888,7 +951,7 @@ func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, c
}
}
- return c.copyFromArchive(containerPath, chown, rename, tarStream)
+ return c.copyFromArchive(containerPath, chown, noOverwriteDirNonDir, rename, tarStream)
}
// CopyToArchive copies the contents from the specified path *inside* the
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 7018ee7d8..c93c9c7bb 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
libpodutil "github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -34,7 +34,7 @@ type ContainerCommitOptions struct {
// image
func (c *Container) Commit(ctx context.Context, destImage string, options ContainerCommitOptions) (*libimage.Image, error) {
if c.config.Rootfs != "" {
- return nil, errors.Errorf("cannot commit a container that uses an exploded rootfs")
+ return nil, errors.New("cannot commit a container that uses an exploded rootfs")
}
if !c.batched {
@@ -48,7 +48,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
if c.state.State == define.ContainerStateRunning && options.Pause {
if err := c.pause(); err != nil {
- return nil, errors.Wrapf(err, "error pausing container %q to commit", c.ID())
+ return nil, fmt.Errorf("error pausing container %q to commit: %w", c.ID(), err)
}
defer func() {
if err := c.unpause(); err != nil {
@@ -136,7 +136,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
if include {
vol, err := c.runtime.GetVolume(v.Name)
if err != nil {
- return nil, errors.Wrapf(err, "volume %s used in container %s has been removed", v.Name, c.ID())
+ return nil, fmt.Errorf("volume %s used in container %s has been removed: %w", v.Name, c.ID(), err)
}
if vol.Anonymous() {
importBuilder.AddVolume(v.Dest)
@@ -202,7 +202,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
imageRef, err := is.Transport.ParseStoreReference(c.runtime.store, resolvedImageName)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q", destImage)
+ return nil, fmt.Errorf("error parsing target image name %q: %w", destImage, err)
}
commitRef = imageRef
}
diff --git a/libpod/container_config.go b/libpod/container_config.go
index 3e85ad4d5..544c45a8c 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -194,7 +194,7 @@ type ContainerSecurityConfig struct {
// If not explicitly set, an unused random MLS label will be assigned by
// containers/storage (but only if SELinux is enabled).
MountLabel string `json:"MountLabel,omitempty"`
- // LabelOpts are options passed in by the user to setup SELinux labels.
+ // LabelOpts are options passed in by the user to set up SELinux labels.
// These are used by the containers/storage library.
LabelOpts []string `json:"labelopts,omitempty"`
// User and group to use in the container. Can be specified as only user
@@ -243,12 +243,12 @@ type ContainerNetworkConfig struct {
// This cannot be set unless CreateNetNS is set.
// If not set, the container will be dynamically assigned an IP by CNI.
// Deprecated: Do no use this anymore, this is only for DB backwards compat.
- StaticIP net.IP `json:"staticIP"`
+ StaticIP net.IP `json:"staticIP,omitempty"`
// StaticMAC is a static MAC to request for the container.
// This cannot be set unless CreateNetNS is set.
// If not set, the container will be dynamically assigned a MAC by CNI.
// Deprecated: Do no use this anymore, this is only for DB backwards compat.
- StaticMAC types.HardwareAddr `json:"staticMAC"`
+ StaticMAC types.HardwareAddr `json:"staticMAC,omitempty"`
// PortMappings are the ports forwarded to the container's network
// namespace
// These are not used unless CreateNetNS is true
@@ -372,7 +372,6 @@ type ContainerMiscConfig struct {
// restart the container. Used only if RestartPolicy is set to
// "on-failure".
RestartRetries uint `json:"restart_retries,omitempty"`
- // TODO log options for log drivers
// PostConfigureNetNS needed when a user namespace is created by an OCI runtime
// if the network namespace is created before the user namespace it will be
// owned by the wrong user namespace.
@@ -387,7 +386,7 @@ type ContainerMiscConfig struct {
IsService bool `json:"isService"`
// SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed
SdNotifyMode string `json:"sdnotifyMode,omitempty"`
- // Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false
+ // Systemd tells libpod to set up the container in systemd mode, a value of nil denotes false
Systemd *bool `json:"systemd,omitempty"`
// HealthCheckConfig has the health check command and related timings
HealthCheckConfig *manifest.Schema2HealthConfig `json:"healthcheck"`
@@ -413,6 +412,9 @@ type ContainerMiscConfig struct {
InitContainerType string `json:"init_container_type,omitempty"`
// PasswdEntry specifies arbitrary data to append to a file.
PasswdEntry string `json:"passwd_entry,omitempty"`
+ // MountAllDevices is an option to indicate whether a privileged container
+ // will mount all the host's devices
+ MountAllDevices bool `json:"mountAllDevices"`
}
// InfraInherit contains the compatible options inheritable from the infra container
@@ -422,7 +424,6 @@ type InfraInherit struct {
CapDrop []string `json:"cap_drop,omitempty"`
HostDeviceList []spec.LinuxDevice `json:"host_device_list,omitempty"`
ImageVolumes []*specgen.ImageVolume `json:"image_volumes,omitempty"`
- InfraResources *spec.LinuxResources `json:"resource_limits,omitempty"`
Mounts []spec.Mount `json:"mounts,omitempty"`
NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
OverlayVolumes []*specgen.OverlayVolume `json:"overlay_volumes,omitempty"`
@@ -430,4 +431,10 @@ type InfraInherit struct {
SeccompProfilePath string `json:"seccomp_profile_path,omitempty"`
SelinuxOpts []string `json:"selinux_opts,omitempty"`
Volumes []*specgen.NamedVolume `json:"volumes,omitempty"`
+ ShmSize *int64 `json:"shm_size"`
+}
+
+// IsDefaultShmSize determines if the user actually set the shm in the parent ctr or if it has been set to the default size
+func (inherit *InfraInherit) IsDefaultShmSize() bool {
+ return inherit.ShmSize == nil || *inherit.ShmSize == 65536000
}
diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go
index 7566fbb12..557fead1e 100644
--- a/libpod/container_copy_linux.go
+++ b/libpod/container_copy_linux.go
@@ -4,6 +4,8 @@
package libpod
import (
+ "errors"
+ "fmt"
"io"
"os"
"path/filepath"
@@ -18,12 +20,11 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
-func (c *Container) copyFromArchive(path string, chown bool, rename map[string]string, reader io.Reader) (func() error, error) {
+func (c *Container) copyFromArchive(path string, chown, noOverwriteDirNonDir bool, rename map[string]string, reader io.Reader) (func() error, error) {
var (
mountPoint string
resolvedRoot string
@@ -89,11 +90,13 @@ func (c *Container) copyFromArchive(path string, chown bool, rename map[string]s
defer unmount()
defer decompressed.Close()
putOptions := buildahCopiah.PutOptions{
- UIDMap: c.config.IDMappings.UIDMap,
- GIDMap: c.config.IDMappings.GIDMap,
- ChownDirs: idPair,
- ChownFiles: idPair,
- Rename: rename,
+ UIDMap: c.config.IDMappings.UIDMap,
+ GIDMap: c.config.IDMappings.GIDMap,
+ ChownDirs: idPair,
+ ChownFiles: idPair,
+ NoOverwriteDirNonDir: noOverwriteDirNonDir,
+ NoOverwriteNonDirDir: noOverwriteDirNonDir,
+ Rename: rename,
}
return c.joinMountAndExec(
@@ -194,7 +197,7 @@ func getContainerUser(container *Container, mountPoint string) (specs.User, erro
if !strings.Contains(userspec, ":") {
groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
if err2 != nil {
- if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
+ if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil {
err = err2
}
} else {
@@ -251,7 +254,7 @@ func (c *Container) joinMountAndExec(f func() error) error {
inHostPidNS, err := c.inHostPidNS()
if err != nil {
- errChan <- errors.Wrap(err, "checking inHostPidNS")
+ errChan <- fmt.Errorf("checking inHostPidNS: %w", err)
return
}
var pidFD *os.File
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index c05e7fd94..d3c80e896 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -2,6 +2,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"io/ioutil"
"net/http"
"os"
@@ -9,10 +11,10 @@ import (
"strconv"
"time"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/storage/pkg/stringid"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -79,11 +81,11 @@ type ExecConfig struct {
type ExecSession struct {
// Id is the ID of the exec session.
// Named somewhat strangely to not conflict with ID().
- // nolint:stylecheck,revive
+ //nolint:stylecheck,revive
Id string `json:"id"`
// ContainerId is the ID of the container this exec session belongs to.
// Named somewhat strangely to not conflict with ContainerID().
- // nolint:stylecheck,revive
+ //nolint:stylecheck,revive
ContainerId string `json:"containerId"`
// State is the state of the exec session.
@@ -112,7 +114,7 @@ func (e *ExecSession) ContainerID() string {
// configuration and current state.
func (e *ExecSession) Inspect() (*define.InspectExecSession, error) {
if e.Config == nil {
- return nil, errors.Wrapf(define.ErrInternal, "given exec session does not have a configuration block")
+ return nil, fmt.Errorf("given exec session does not have a configuration block: %w", define.ErrInternal)
}
output := new(define.InspectExecSession)
@@ -165,18 +167,18 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
// Verify our config
if config == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a configuration to ExecCreate")
+ return "", fmt.Errorf("must provide a configuration to ExecCreate: %w", define.ErrInvalidArg)
}
if len(config.Command) == 0 {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session")
+ return "", fmt.Errorf("must provide a non-empty command to start an exec session: %w", define.ErrInvalidArg)
}
if config.ExitCommandDelay > 0 && len(config.ExitCommand) == 0 {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty exit command if giving an exit command delay")
+ return "", fmt.Errorf("must provide a non-empty exit command if giving an exit command delay: %w", define.ErrInvalidArg)
}
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
- return "", errors.Wrapf(define.ErrCtrStateInvalid, "can only create exec sessions on running containers")
+ return "", fmt.Errorf("can only create exec sessions on running containers: %w", define.ErrCtrStateInvalid)
}
// Generate an ID for our new exec session
@@ -203,7 +205,7 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
session.State = define.ExecStateCreated
session.Config = new(ExecConfig)
if err := JSONDeepCopy(config, session.Config); err != nil {
- return "", errors.Wrapf(err, "error copying exec configuration into exec session")
+ return "", fmt.Errorf("error copying exec configuration into exec session: %w", err)
}
if len(session.Config.ExitCommand) > 0 {
@@ -243,16 +245,16 @@ func (c *Container) ExecStart(sessionID string) error {
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
if session.State != define.ExecStateCreated {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
}
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
@@ -277,11 +279,13 @@ func (c *Container) ExecStart(sessionID string) error {
return c.save()
}
+func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachStreams, newSize *resize.TerminalSize) error {
+ return c.execStartAndAttach(sessionID, streams, newSize, false)
+}
+
// ExecStartAndAttach starts and attaches to an exec session in a container.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
-// TODO: Should we include detach keys in the signature to allow override?
-// TODO: How do we handle AttachStdin/AttachStdout/AttachStderr?
-func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachStreams, newSize *define.TerminalSize) error {
+func (c *Container) execStartAndAttach(sessionID string, streams *define.AttachStreams, newSize *resize.TerminalSize, isHealthcheck bool) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -293,16 +297,16 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
if session.State != define.ExecStateCreated {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
}
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
@@ -317,7 +321,12 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
return err
}
- c.newContainerEvent(events.Exec)
+ if isHealthcheck {
+ c.newContainerEvent(events.HealthStatus)
+ } else {
+ c.newContainerEvent(events.Exec)
+ }
+
logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID())
var lastErr error
@@ -363,7 +372,7 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
- return errors.Wrapf(err, "error syncing container %s state to update exec session %s", c.ID(), sessionID)
+ return fmt.Errorf("error syncing container %s state to update exec session %s: %w", c.ID(), sessionID, err)
}
// Now handle the error from readExecExitCode above.
@@ -415,7 +424,7 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
func (c *Container) ExecHTTPStartAndAttach(sessionID string, r *http.Request, w http.ResponseWriter,
- streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, newSize *define.TerminalSize) error {
+ streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, newSize *resize.TerminalSize) error {
// TODO: How do we combine streams with the default streams set in the exec session?
// Ensure that we don't leak a goroutine here
@@ -434,16 +443,16 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, r *http.Request, w
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
+ return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
}
if session.State != define.ExecStateCreated {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
+ return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
}
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
@@ -560,11 +569,11 @@ func (c *Container) ExecStop(sessionID string, timeout *uint) error {
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
if session.State != define.ExecStateRunning {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is %q, can only stop running sessions", c.ID(), session.ID(), session.State.String())
+ return fmt.Errorf("container %s exec session %s is %q, can only stop running sessions: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
}
logrus.Infof("Stopping container %s exec session %s", c.ID(), session.ID())
@@ -610,7 +619,7 @@ func (c *Container) ExecCleanup(sessionID string) error {
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
if session.State == define.ExecStateRunning {
@@ -621,7 +630,7 @@ func (c *Container) ExecCleanup(sessionID string) error {
}
if alive {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
+ return fmt.Errorf("cannot clean up container %s exec session %s as it is running: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
}
if err := retrieveAndWriteExecExitCode(c, session.ID()); err != nil {
@@ -648,7 +657,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID())
@@ -669,7 +678,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
if session.State == define.ExecStateRunning {
if !force {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is still running, cannot remove", c.ID(), session.ID())
+ return fmt.Errorf("container %s exec session %s is still running, cannot remove: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
}
// Stop the session
@@ -703,7 +712,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
// ExecResize resizes the TTY of the given exec session. Only available if the
// exec session created a TTY.
-func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) error {
+func (c *Container) ExecResize(sessionID string, newSize resize.TerminalSize) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -715,13 +724,13 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
session, ok := c.state.ExecSessions[sessionID]
if !ok {
- return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
+ return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
}
logrus.Infof("Resizing container %s exec session %s to %+v", c.ID(), session.ID(), newSize)
if session.State != define.ExecStateRunning {
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it is not running", c.ID(), session.ID())
+ return fmt.Errorf("cannot resize container %s exec session %s as it is not running: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
}
// The exec session may have exited since we last updated.
@@ -737,7 +746,7 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
logrus.Errorf("Saving state of container %s: %v", c.ID(), err)
}
- return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it has stopped", c.ID(), session.ID())
+ return fmt.Errorf("cannot resize container %s exec session %s as it has stopped: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
}
// Make sure the exec session is still running.
@@ -745,10 +754,14 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
return c.ociRuntime.ExecAttachResize(c, sessionID, newSize)
}
+func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resize <-chan resize.TerminalSize) (int, error) {
+ return c.exec(config, streams, resize, false)
+}
+
// Exec emulates the old Libpod exec API, providing a single call to create,
// run, and remove an exec session. Returns exit code and error. Exit code is
// not guaranteed to be set sanely if error is not nil.
-func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resize <-chan define.TerminalSize) (int, error) {
+func (c *Container) exec(config *ExecConfig, streams *define.AttachStreams, resizeChan <-chan resize.TerminalSize, isHealthcheck bool) (int, error) {
sessionID, err := c.ExecCreate(config)
if err != nil {
return -1, err
@@ -761,15 +774,15 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
// API there.
// TODO: Refactor so this is closed here, before we remove the exec
// session.
- var size *define.TerminalSize
- if resize != nil {
- s := <-resize
+ var size *resize.TerminalSize
+ if resizeChan != nil {
+ s := <-resizeChan
size = &s
go func() {
logrus.Debugf("Sending resize events to exec session %s", sessionID)
- for resizeRequest := range resize {
+ for resizeRequest := range resizeChan {
if err := c.ExecResize(sessionID, resizeRequest); err != nil {
- if errors.Cause(err) == define.ErrExecSessionStateInvalid {
+ if errors.Is(err, define.ErrExecSessionStateInvalid) {
// The exec session stopped
// before we could resize.
logrus.Infof("Missed resize on exec session %s, already stopped", sessionID)
@@ -782,13 +795,13 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
}()
}
- if err := c.ExecStartAndAttach(sessionID, streams, size); err != nil {
+ if err := c.execStartAndAttach(sessionID, streams, size, isHealthcheck); err != nil {
return -1, err
}
session, err := c.execSessionNoCopy(sessionID)
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchExecSession {
+ if errors.Is(err, define.ErrNoSuchExecSession) {
// TODO: If a proper Context is ever plumbed in here, we
// should use it.
// As things stand, though, it's not worth it - this
@@ -796,7 +809,7 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
// streaming.
diedEvent, err := c.runtime.GetExecDiedEvent(context.Background(), c.ID(), sessionID)
if err != nil {
- return -1, errors.Wrapf(err, "error retrieving exec session %s exit code", sessionID)
+ return -1, fmt.Errorf("error retrieving exec session %s exit code: %w", sessionID, err)
}
return diedEvent.ContainerExitCode, nil
}
@@ -804,7 +817,7 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
}
exitCode := session.ExitCode
if err := c.ExecRemove(sessionID, false); err != nil {
- if errors.Cause(err) == define.ErrNoSuchExecSession {
+ if errors.Is(err, define.ErrNoSuchExecSession) {
return exitCode, nil
}
return -1, err
@@ -826,7 +839,7 @@ func (c *Container) cleanupExecBundle(sessionID string) (err error) {
}
if pathErr, ok := err.(*os.PathError); ok {
err = pathErr.Err
- if errors.Cause(err) == unix.ENOTEMPTY || errors.Cause(err) == unix.EBUSY {
+ if errors.Is(err, unix.ENOTEMPTY) || errors.Is(err, unix.EBUSY) {
// give other processes a chance to use the container
if !c.batched {
if err := c.save(); err != nil {
@@ -898,7 +911,7 @@ func (c *Container) createExecBundle(sessionID string) (retErr error) {
if err := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
- return errors.Wrapf(err, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
+ return fmt.Errorf("error creating OCI runtime exit file path %s: %w", c.execExitFileDir(sessionID), err)
}
}
return nil
@@ -937,7 +950,7 @@ func (c *Container) getExecSessionPID(sessionID string) (int, error) {
return oldSession.PID, nil
}
- return -1, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", sessionID, c.ID())
+ return -1, fmt.Errorf("no exec session with ID %s found in container %s: %w", sessionID, c.ID(), define.ErrNoSuchExecSession)
}
// getKnownExecSessions gets a list of all exec sessions we think are running,
@@ -1051,7 +1064,7 @@ func (c *Container) removeAllExecSessions() error {
}
// Delete all exec sessions
if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil {
- if errors.Cause(err) != define.ErrCtrRemoved {
+ if !errors.Is(err, define.ErrCtrRemoved) {
if lastErr != nil {
logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
}
@@ -1061,7 +1074,7 @@ func (c *Container) removeAllExecSessions() error {
c.state.ExecSessions = nil
c.state.LegacyExecSessions = nil
if err := c.save(); err != nil {
- if errors.Cause(err) != define.ErrCtrRemoved {
+ if !errors.Is(err, define.ErrCtrRemoved) {
if lastErr != nil {
logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
}
@@ -1102,13 +1115,13 @@ func writeExecExitCode(c *Container, sessionID string, exitCode int) error {
// If we can't do this, no point in continuing, any attempt to save
// would write garbage to the DB.
if err := c.syncContainer(); err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
// Container's entirely removed. We can't save status,
// but the container's entirely removed, so we don't
// need to. Exit without error.
return nil
}
- return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), sessionID)
+ return fmt.Errorf("error syncing container %s state to remove exec session %s: %w", c.ID(), sessionID, err)
}
return justWriteExecExitCode(c, sessionID, exitCode)
diff --git a/libpod/container_graph.go b/libpod/container_graph.go
index eeb0f02fa..67b1abc34 100644
--- a/libpod/container_graph.go
+++ b/libpod/container_graph.go
@@ -2,10 +2,10 @@ package libpod
import (
"context"
+ "fmt"
"strings"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -60,7 +60,7 @@ func BuildContainerGraph(ctrs []*Container) (*ContainerGraph, error) {
// Get the dep's node
depNode, ok := graph.nodes[dep]
if !ok {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s not found in input list", node.id, dep)
+ return nil, fmt.Errorf("container %s depends on container %s not found in input list: %w", node.id, dep, define.ErrNoSuchCtr)
}
// Add the dependent node to the node's dependencies
@@ -85,7 +85,7 @@ func BuildContainerGraph(ctrs []*Container) (*ContainerGraph, error) {
if err != nil {
return nil, err
} else if cycle {
- return nil, errors.Wrapf(define.ErrInternal, "cycle found in container dependency graph")
+ return nil, fmt.Errorf("cycle found in container dependency graph: %w", define.ErrInternal)
}
return graph, nil
@@ -150,7 +150,7 @@ func detectCycles(graph *ContainerGraph) (bool, error) {
if info.lowLink == info.index {
l := len(stack)
if l == 0 {
- return false, errors.Wrapf(define.ErrInternal, "empty stack in detectCycles")
+ return false, fmt.Errorf("empty stack in detectCycles: %w", define.ErrInternal)
}
// Pop off the stack
@@ -160,7 +160,7 @@ func detectCycles(graph *ContainerGraph) (bool, error) {
// Popped item is no longer on the stack, mark as such
topInfo, ok := nodes[topOfStack.id]
if !ok {
- return false, errors.Wrapf(define.ErrInternal, "error finding node info for %s", topOfStack.id)
+ return false, fmt.Errorf("error finding node info for %s: %w", topOfStack.id, define.ErrInternal)
}
topInfo.onStack = false
@@ -203,7 +203,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
if setError {
// Mark us as visited, and set an error
ctrsVisited[node.id] = true
- ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
+ ctrErrors[node.id] = fmt.Errorf("a dependency of container %s failed to start: %w", node.id, define.ErrCtrStateInvalid)
// Hit anyone who depends on us, and set errors on them too
for _, successor := range node.dependedOn {
@@ -243,7 +243,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
} else if len(depsStopped) > 0 {
// Our dependencies are not running
depsList := strings.Join(depsStopped, ",")
- ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
+ ctrErrors[node.id] = fmt.Errorf("the following dependencies of container %s are not running: %s: %w", node.id, depsList, define.ErrCtrStateInvalid)
ctrErrored = true
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 93240812d..fa2130a28 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"sort"
"strings"
@@ -14,7 +15,6 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/runtime-tools/validate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability"
)
@@ -24,15 +24,15 @@ import (
func (c *Container) inspectLocked(size bool) (*define.InspectContainerData, error) {
storeCtr, err := c.runtime.store.Container(c.ID())
if err != nil {
- return nil, errors.Wrapf(err, "error getting container from store %q", c.ID())
+ return nil, fmt.Errorf("error getting container from store %q: %w", c.ID(), err)
}
layer, err := c.runtime.store.Layer(storeCtr.LayerID)
if err != nil {
- return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID)
+ return nil, fmt.Errorf("error reading information about layer %q: %w", storeCtr.LayerID, err)
}
driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
if err != nil {
- return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID())
+ return nil, fmt.Errorf("error getting graph driver info %q: %w", c.ID(), err)
}
return c.getContainerInspectData(size, driverData)
}
@@ -241,7 +241,7 @@ func (c *Container) GetMounts(namedVolumes []*ContainerNamedVolume, imageVolumes
// volume.
volFromDB, err := c.runtime.state.Volume(volume.Name)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
+ return nil, fmt.Errorf("error looking up volume %s in container %s config: %w", volume.Name, c.ID(), err)
}
mountStruct.Driver = volFromDB.Driver()
@@ -794,28 +794,8 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
hostConfig.PidMode = pidMode
// UTS namespace mode
- utsMode := ""
- if c.config.UTSNsCtr != "" {
- utsMode = fmt.Sprintf("container:%s", c.config.UTSNsCtr)
- } else if ctrSpec.Linux != nil {
- // Locate the spec's UTS namespace.
- // If there is none, it's uts=host.
- // If there is one and it has a path, it's "ns:".
- // If there is no path, it's default - the empty string.
- for _, ns := range ctrSpec.Linux.Namespaces {
- if ns.Type == spec.UTSNamespace {
- if ns.Path != "" {
- utsMode = fmt.Sprintf("ns:%s", ns.Path)
- } else {
- utsMode = "private"
- }
- break
- }
- }
- if utsMode == "" {
- utsMode = "host"
- }
- }
+ utsMode := c.NamespaceMode(spec.UTSNamespace, ctrSpec)
+
hostConfig.UTSMode = utsMode
// User namespace mode
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 7494eb3ec..560b4a1c1 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -3,6 +3,7 @@ package libpod
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -17,9 +18,11 @@ import (
"github.com/containers/buildah/pkg/overlay"
butil "github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/etchosts"
+ "github.com/containers/common/libnetwork/resolvconf"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/chown"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/ctime"
@@ -39,7 +42,6 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -83,7 +85,7 @@ func (c *Container) rootFsSize() (int64, error) {
for layer.Parent != "" {
layerSize, err := c.runtime.store.DiffSize(layer.Parent, layer.ID)
if err != nil {
- return size, errors.Wrapf(err, "getting diffsize of layer %q and its parent %q", layer.ID, layer.Parent)
+ return size, fmt.Errorf("getting diffsize of layer %q and its parent %q: %w", layer.ID, layer.Parent, err)
}
size += layerSize
layer, err = c.runtime.store.Layer(layer.Parent)
@@ -199,12 +201,12 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
c.state.FinishedTime = ctime.Created(fi)
statusCodeStr, err := ioutil.ReadFile(exitFile)
if err != nil {
- return errors.Wrapf(err, "failed to read exit file for container %s", c.ID())
+ return fmt.Errorf("failed to read exit file for container %s: %w", c.ID(), err)
}
statusCode, err := strconv.Atoi(string(statusCodeStr))
if err != nil {
- return errors.Wrapf(err, "error converting exit status code (%q) for container %s to int",
- c.ID(), statusCodeStr)
+ return fmt.Errorf("error converting exit status code (%q, err) for container %s to int: %w",
+ c.ID(), statusCodeStr, err)
}
c.state.ExitCode = int32(statusCode)
@@ -218,7 +220,7 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
// Write an event for the container's death
c.newContainerExitedEvent(c.state.ExitCode)
- return nil
+ return c.runtime.state.AddContainerExitCode(c.ID(), c.state.ExitCode)
}
func (c *Container) shouldRestart() bool {
@@ -266,7 +268,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
return false, nil
} else if c.state.State == define.ContainerStateUnknown {
- return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt")
+ return false, fmt.Errorf("invalid container state encountered in restart attempt: %w", define.ErrInternal)
}
c.newContainerEvent(events.Restart)
@@ -289,7 +291,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
return false, err
}
- // setup slirp4netns again because slirp4netns will die when conmon exits
+ // set up slirp4netns again because slirp4netns will die when conmon exits
if c.config.NetMode.IsSlirp4netns() {
err := c.runtime.setupSlirp4netns(c, c.state.NetNS)
if err != nil {
@@ -297,7 +299,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
}
}
- // setup rootlesskit port forwarder again since it dies when conmon exits
+ // set up rootlesskit port forwarder again since it dies when conmon exits
// we use rootlesskit port forwarder only as rootless and when bridge network is used
if rootless.IsRootless() && c.config.NetMode.IsBridge() && len(c.config.PortMappings) > 0 {
err := c.runtime.setupRootlessPortMappingViaRLK(c, c.state.NetNS.Path(), c.state.NetworkStatus)
@@ -369,7 +371,7 @@ func (c *Container) syncContainer() error {
}
if !c.valid {
- return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
+ return fmt.Errorf("container %s is not valid: %w", c.ID(), define.ErrCtrRemoved)
}
return nil
@@ -428,16 +430,16 @@ func (c *Container) setupStorageMapping(dest, from *storage.IDMappingOptions) {
// Create container root filesystem for use
func (c *Container) setupStorage(ctx context.Context) error {
if !c.valid {
- return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
+ return fmt.Errorf("container %s is not valid: %w", c.ID(), define.ErrCtrRemoved)
}
if c.state.State != define.ContainerStateConfigured {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
+ return fmt.Errorf("container %s must be in Configured state to have storage set up: %w", c.ID(), define.ErrCtrStateInvalid)
}
// Need both an image ID and image name, plus a bool telling us whether to use the image configuration
if c.config.Rootfs == "" && (c.config.RootfsImageID == "" || c.config.RootfsImageName == "") {
- return errors.Wrapf(define.ErrInvalidArg, "must provide image ID and image name to use an image")
+ return fmt.Errorf("must provide image ID and image name to use an image: %w", define.ErrInvalidArg)
}
options := storage.ContainerOptions{
IDMappingOptions: storage.IDMappingOptions{
@@ -473,7 +475,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
defOptions, err := storage.GetMountOptions(c.runtime.store.GraphDriverName(), c.runtime.store.GraphOptions())
if err != nil {
- return errors.Wrapf(err, "error getting default mount options")
+ return fmt.Errorf("error getting default mount options: %w", err)
}
var newOptions []string
for _, opt := range defOptions {
@@ -503,12 +505,12 @@ func (c *Container) setupStorage(ctx context.Context) error {
}
containerInfo, containerInfoErr = c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, options)
- if !generateName || errors.Cause(containerInfoErr) != storage.ErrDuplicateName {
+ if !generateName || !errors.Is(containerInfoErr, storage.ErrDuplicateName) {
break
}
}
if containerInfoErr != nil {
- return errors.Wrapf(containerInfoErr, "error creating container storage")
+ return fmt.Errorf("error creating container storage: %w", containerInfoErr)
}
// only reconfig IDMappings if layer was mounted from storage
@@ -550,7 +552,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
if err := os.MkdirAll(artifacts, 0755); err != nil {
- return errors.Wrap(err, "error creating artifacts directory")
+ return fmt.Errorf("error creating artifacts directory: %w", err)
}
return nil
@@ -579,16 +581,16 @@ func (c *Container) processLabel(processLabel string) (string, error) {
// Tear down a container's storage prior to removal
func (c *Container) teardownStorage() error {
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
+ return fmt.Errorf("cannot remove storage for container %s as it is running or paused: %w", c.ID(), define.ErrCtrStateInvalid)
}
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
if err := os.RemoveAll(artifacts); err != nil {
- return errors.Wrapf(err, "error removing container %s artifacts %q", c.ID(), artifacts)
+ return fmt.Errorf("error removing container %s artifacts %q: %w", c.ID(), artifacts, err)
}
if err := c.cleanupStorage(); err != nil {
- return errors.Wrapf(err, "failed to cleanup container %s storage", c.ID())
+ return fmt.Errorf("failed to clean up container %s storage: %w", c.ID(), err)
}
if err := c.runtime.storageService.DeleteContainer(c.ID()); err != nil {
@@ -596,12 +598,12 @@ func (c *Container) teardownStorage() error {
// error - we wanted it gone, it is already gone.
// Potentially another tool using containers/storage already
// removed it?
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
logrus.Infof("Storage for container %s already removed", c.ID())
return nil
}
- return errors.Wrapf(err, "error removing container %s root filesystem", c.ID())
+ return fmt.Errorf("error removing container %s root filesystem: %w", c.ID(), err)
}
return nil
@@ -645,14 +647,14 @@ func (c *Container) refresh() error {
}
if !c.valid {
- return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID())
+ return fmt.Errorf("container %s is not valid - may have been removed: %w", c.ID(), define.ErrCtrRemoved)
}
// We need to get the container's temporary directory from c/storage
// It was lost in the reboot and must be recreated
dir, err := c.runtime.storageService.GetRunDir(c.ID())
if err != nil {
- return errors.Wrapf(err, "error retrieving temporary directory for container %s", c.ID())
+ return fmt.Errorf("error retrieving temporary directory for container %s: %w", c.ID(), err)
}
c.state.RunDir = dir
@@ -666,7 +668,7 @@ func (c *Container) refresh() error {
}
root := filepath.Join(c.runtime.config.Engine.TmpDir, "containers-root", c.ID())
if err := os.MkdirAll(root, 0755); err != nil {
- return errors.Wrapf(err, "error creating userNS tmpdir for container %s", c.ID())
+ return fmt.Errorf("error creating userNS tmpdir for container %s: %w", c.ID(), err)
}
if err := os.Chown(root, c.RootUID(), c.RootGID()); err != nil {
return err
@@ -676,7 +678,7 @@ func (c *Container) refresh() error {
// We need to pick up a new lock
lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error acquiring lock %d for container %s", c.config.LockID, c.ID())
+ return fmt.Errorf("error acquiring lock %d for container %s: %w", c.config.LockID, c.ID(), err)
}
c.lock = lock
@@ -691,13 +693,13 @@ func (c *Container) refresh() error {
if c.config.rewrite {
// SafeRewriteContainerConfig must be used with care. Make sure to not change config fields by accident.
if err := c.runtime.state.SafeRewriteContainerConfig(c, "", "", c.config); err != nil {
- return errors.Wrapf(err, "failed to rewrite the config for container %s", c.config.ID)
+ return fmt.Errorf("failed to rewrite the config for container %s: %w", c.config.ID, err)
}
c.config.rewrite = false
}
if err := c.save(); err != nil {
- return errors.Wrapf(err, "error refreshing state for container %s", c.ID())
+ return fmt.Errorf("error refreshing state for container %s: %w", c.ID(), err)
}
// Remove ctl and attach files, which may persist across reboot
@@ -714,26 +716,26 @@ func (c *Container) removeConmonFiles() error {
// Files are allowed to not exist, so ignore ENOENT
attachFile, err := c.AttachSocketPath()
if err != nil {
- return errors.Wrapf(err, "failed to get attach socket path for container %s", c.ID())
+ return fmt.Errorf("failed to get attach socket path for container %s: %w", c.ID(), err)
}
if err := os.Remove(attachFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s attach file", c.ID())
+ return fmt.Errorf("error removing container %s attach file: %w", c.ID(), err)
}
ctlFile := filepath.Join(c.bundlePath(), "ctl")
if err := os.Remove(ctlFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s ctl file", c.ID())
+ return fmt.Errorf("error removing container %s ctl file: %w", c.ID(), err)
}
winszFile := filepath.Join(c.bundlePath(), "winsz")
if err := os.Remove(winszFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s winsz file", c.ID())
+ return fmt.Errorf("error removing container %s winsz file: %w", c.ID(), err)
}
oomFile := filepath.Join(c.bundlePath(), "oom")
if err := os.Remove(oomFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s OOM file", c.ID())
+ return fmt.Errorf("error removing container %s OOM file: %w", c.ID(), err)
}
// Remove the exit file so we don't leak memory in tmpfs
@@ -742,7 +744,7 @@ func (c *Container) removeConmonFiles() error {
return err
}
if err := os.Remove(exitFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s exit file", c.ID())
+ return fmt.Errorf("error removing container %s exit file: %w", c.ID(), err)
}
return nil
@@ -753,7 +755,7 @@ func (c *Container) export(path string) error {
if !c.state.Mounted {
containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
if err != nil {
- return errors.Wrapf(err, "mounting container %q", c.ID())
+ return fmt.Errorf("mounting container %q: %w", c.ID(), err)
}
mountPoint = containerMount
defer func() {
@@ -765,12 +767,12 @@ func (c *Container) export(path string) error {
input, err := archive.Tar(mountPoint, archive.Uncompressed)
if err != nil {
- return errors.Wrapf(err, "error reading container directory %q", c.ID())
+ return fmt.Errorf("error reading container directory %q: %w", c.ID(), err)
}
outFile, err := os.Create(path)
if err != nil {
- return errors.Wrapf(err, "error creating file %q", path)
+ return fmt.Errorf("error creating file %q: %w", path, err)
}
defer outFile.Close()
@@ -783,24 +785,10 @@ func (c *Container) getArtifactPath(name string) string {
return filepath.Join(c.config.StaticDir, artifactsDir, name)
}
-// Used with Wait() to determine if a container has exited
-func (c *Container) isStopped() (bool, int32, error) {
- if !c.batched {
- c.lock.Lock()
- defer c.lock.Unlock()
- }
- err := c.syncContainer()
- if err != nil {
- return true, -1, err
- }
-
- return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopping), c.state.ExitCode, nil
-}
-
// save container state to the database
func (c *Container) save() error {
if err := c.runtime.state.SaveContainer(c); err != nil {
- return errors.Wrapf(err, "error saving container %s state", c.ID())
+ return fmt.Errorf("error saving container %s state: %w", c.ID(), err)
}
return nil
}
@@ -811,7 +799,7 @@ func (c *Container) save() error {
func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr error) {
// Container must be created or stopped to be started
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
+ return fmt.Errorf("container %s must be in Created or Stopped state to be started: %w", c.ID(), define.ErrCtrStateInvalid)
}
if !recursive {
@@ -854,11 +842,11 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr
func (c *Container) checkDependenciesAndHandleError() error {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
+ return fmt.Errorf("error checking dependencies for container %s: %w", c.ID(), err)
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
- return errors.Wrapf(define.ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ return fmt.Errorf("some dependencies of container %s are not started: %s: %w", c.ID(), depString, define.ErrCtrStateInvalid)
}
return nil
@@ -873,7 +861,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
depVisitedCtrs := make(map[string]*Container)
if err := c.getAllDependencies(depVisitedCtrs); err != nil {
- return errors.Wrapf(err, "error starting dependency for container %s", c.ID())
+ return fmt.Errorf("error starting dependency for container %s: %w", c.ID(), err)
}
// Because of how Go handles passing slices through functions, a slice cannot grow between function calls
@@ -886,7 +874,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
// Build a dependency graph of containers
graph, err := BuildContainerGraph(depCtrs)
if err != nil {
- return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID())
+ return fmt.Errorf("error generating dependency graph for container %s: %w", c.ID(), err)
}
// If there are no containers without dependencies, we can't start
@@ -896,7 +884,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
if len(graph.nodes) == 0 {
return nil
}
- return errors.Wrapf(define.ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
+ return fmt.Errorf("all dependencies have dependencies of %s: %w", c.ID(), define.ErrNoSuchCtr)
}
ctrErrors := make(map[string]error)
@@ -912,7 +900,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
for _, e := range ctrErrors {
logrus.Errorf("%q", e)
}
- return errors.Wrapf(define.ErrInternal, "error starting some containers")
+ return fmt.Errorf("error starting some containers: %w", define.ErrInternal)
}
return nil
}
@@ -968,13 +956,13 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
// Get the dependency container
depCtr, err := c.runtime.state.Container(dep)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving dependency %s of container %s from state", dep, c.ID())
+ return nil, fmt.Errorf("error retrieving dependency %s of container %s from state: %w", dep, c.ID(), err)
}
// Check the status
state, err := depCtr.State()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
+ return nil, fmt.Errorf("error retrieving state of dependency %s of container %s: %w", dep, c.ID(), err)
}
if state != define.ContainerStateRunning && !depCtr.config.IsInfra {
notRunning = append(notRunning, dep)
@@ -986,7 +974,7 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
}
func (c *Container) completeNetworkSetup() error {
- var outResolvConf []string
+ var nameservers []string
netDisabled, err := c.NetworkDisabled()
if err != nil {
return err
@@ -1000,11 +988,14 @@ func (c *Container) completeNetworkSetup() error {
if err := c.runtime.setupNetNS(c); err != nil {
return err
}
+ if err := c.save(); err != nil {
+ return err
+ }
state := c.state
// collect any dns servers that cni tells us to use (dnsname)
for _, status := range c.getNetworkStatus() {
for _, server := range status.DNSServerIPs {
- outResolvConf = append(outResolvConf, fmt.Sprintf("nameserver %s", server))
+ nameservers = append(nameservers, server.String())
}
}
// check if we have a bindmount for /etc/hosts
@@ -1020,24 +1011,12 @@ func (c *Container) completeNetworkSetup() error {
}
// check if we have a bindmount for resolv.conf
- resolvBindMount := state.BindMounts["/etc/resolv.conf"]
- if len(outResolvConf) < 1 || resolvBindMount == "" || len(c.config.NetNsCtr) > 0 {
+ resolvBindMount := state.BindMounts[resolvconf.DefaultResolvConf]
+ if len(nameservers) < 1 || resolvBindMount == "" || len(c.config.NetNsCtr) > 0 {
return nil
}
- // read the existing resolv.conf
- b, err := ioutil.ReadFile(resolvBindMount)
- if err != nil {
- return err
- }
- for _, line := range strings.Split(string(b), "\n") {
- // only keep things that don't start with nameserver from the old
- // resolv.conf file
- if !strings.HasPrefix(line, "nameserver") {
- outResolvConf = append([]string{line}, outResolvConf...)
- }
- }
// write and return
- return ioutil.WriteFile(resolvBindMount, []byte(strings.Join(outResolvConf, "\n")), 0644)
+ return resolvconf.Add(resolvBindMount, nameservers)
}
// Initialize a container, creating it in the runtime
@@ -1176,9 +1155,9 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
func (c *Container) initAndStart(ctx context.Context) (retErr error) {
// If we are ContainerStateUnknown, throw an error
if c.state.State == define.ContainerStateUnknown {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
+ return fmt.Errorf("container %s is in an unknown state: %w", c.ID(), define.ErrCtrStateInvalid)
} else if c.state.State == define.ContainerStateRemoving {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start container %s as it is being removed", c.ID())
+ return fmt.Errorf("cannot start container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
}
// If we are running, do nothing
@@ -1187,7 +1166,7 @@ func (c *Container) initAndStart(ctx context.Context) (retErr error) {
}
// If we are paused, throw an error
if c.state.State == define.ContainerStatePaused {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
+ return fmt.Errorf("cannot start paused container %s: %w", c.ID(), define.ErrCtrStateInvalid)
}
defer func() {
@@ -1290,13 +1269,6 @@ func (c *Container) stop(timeout uint) error {
}
}
- // Check if conmon is still alive.
- // If it is not, we won't be getting an exit file.
- conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
- if err != nil {
- return err
- }
-
// Set the container state to "stopping" and unlock the container
// before handing it over to conmon to unblock other commands. #8501
// demonstrates nicely that a high stop timeout will block even simple
@@ -1304,7 +1276,7 @@ func (c *Container) stop(timeout uint) error {
// is held when busy-waiting for the container to be stopped.
c.state.State = define.ContainerStateStopping
if err := c.save(); err != nil {
- return errors.Wrapf(err, "error saving container %s state before stopping", c.ID())
+ return fmt.Errorf("error saving container %s state before stopping: %w", c.ID(), err)
}
if !c.batched {
c.lock.Unlock()
@@ -1315,18 +1287,18 @@ func (c *Container) stop(timeout uint) error {
if !c.batched {
c.lock.Lock()
if err := c.syncContainer(); err != nil {
- switch errors.Cause(err) {
- // If the container has already been removed (e.g., via
- // the cleanup process), there's nothing left to do.
- case define.ErrNoSuchCtr, define.ErrCtrRemoved:
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
+ // If the container has already been removed (e.g., via
+ // the cleanup process), set the container state to "stopped".
+ c.state.State = define.ContainerStateStopped
+ return stopErr
+ }
+
+ if stopErr != nil {
+ logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
return stopErr
- default:
- if stopErr != nil {
- logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
- return stopErr
- }
- return err
}
+ return err
}
}
@@ -1349,25 +1321,22 @@ func (c *Container) stop(timeout uint) error {
}
c.newContainerEvent(events.Stop)
-
- c.state.PID = 0
- c.state.ConmonPID = 0
c.state.StoppedByUser = true
+ conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
+ if err != nil {
+ return err
+ }
if !conmonAlive {
- // Conmon is dead, so we can't expect an exit code.
- c.state.ExitCode = -1
- c.state.FinishedTime = time.Now()
- c.state.State = define.ContainerStateStopped
- if err := c.save(); err != nil {
- logrus.Errorf("Saving container %s status: %v", c.ID(), err)
+ if err := c.checkExitFile(); err != nil {
+ return err
}
- return errors.Wrapf(define.ErrConmonDead, "container %s conmon process missing, cannot retrieve exit code", c.ID())
+ return c.save()
}
if err := c.save(); err != nil {
- return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())
+ return fmt.Errorf("error saving container %s state after stopping: %w", c.ID(), err)
}
// Wait until we have an exit file, and sync once we do
@@ -1381,16 +1350,16 @@ func (c *Container) stop(timeout uint) error {
// Internal, non-locking function to pause a container
func (c *Container) pause() error {
if c.config.NoCgroups {
- return errors.Wrapf(define.ErrNoCgroups, "cannot pause without using Cgroups")
+ return fmt.Errorf("cannot pause without using Cgroups: %w", define.ErrNoCgroups)
}
if rootless.IsRootless() {
cgroupv2, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
- return errors.Wrap(err, "failed to determine cgroupversion")
+ return fmt.Errorf("failed to determine cgroupversion: %w", err)
}
if !cgroupv2 {
- return errors.Wrap(define.ErrNoCgroups, "can not pause containers on rootless containers with cgroup V1")
+ return fmt.Errorf("can not pause containers on rootless containers with cgroup V1: %w", define.ErrNoCgroups)
}
}
@@ -1409,7 +1378,7 @@ func (c *Container) pause() error {
// Internal, non-locking function to unpause a container
func (c *Container) unpause() error {
if c.config.NoCgroups {
- return errors.Wrapf(define.ErrNoCgroups, "cannot unpause without using Cgroups")
+ return fmt.Errorf("cannot unpause without using Cgroups: %w", define.ErrNoCgroups)
}
if err := c.ociRuntime.UnpauseContainer(c); err != nil {
@@ -1427,7 +1396,7 @@ func (c *Container) unpause() error {
// Internal, non-locking function to restart a container
func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retErr error) {
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStateExited) {
- return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
+ return fmt.Errorf("unable to restart a container in a paused or unknown state: %w", define.ErrCtrStateInvalid)
}
c.newContainerEvent(events.Restart)
@@ -1502,7 +1471,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
if !c.config.NoShm {
mounted, err := mount.Mounted(c.config.ShmDir)
if err != nil {
- return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir)
+ return "", fmt.Errorf("unable to determine if %q is mounted: %w", c.config.ShmDir, err)
}
if !mounted && !MountExists(c.config.Spec.Mounts, "/dev/shm") {
@@ -1511,7 +1480,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
return "", err
}
if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil {
- return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir)
+ return "", fmt.Errorf("failed to chown %s: %w", c.config.ShmDir, err)
}
defer func() {
if deferredErr != nil {
@@ -1531,11 +1500,11 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
overlayDest := c.runtime.RunRoot()
contentDir, err := overlay.GenerateStructure(overlayDest, c.ID(), "rootfs", c.RootUID(), c.RootGID())
if err != nil {
- return "", errors.Wrapf(err, "rootfs-overlay: failed to create TempDir in the %s directory", overlayDest)
+ return "", fmt.Errorf("rootfs-overlay: failed to create TempDir in the %s directory: %w", overlayDest, err)
}
overlayMount, err := overlay.Mount(contentDir, c.config.Rootfs, overlayDest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
if err != nil {
- return "", errors.Wrapf(err, "rootfs-overlay: creating overlay failed %q", c.config.Rootfs)
+ return "", fmt.Errorf("rootfs-overlay: creating overlay failed %q: %w", c.config.Rootfs, err)
}
// Seems fuse-overlayfs is not present
@@ -1545,7 +1514,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
mountOpts := label.FormatMountLabel(strings.Join(overlayMount.Options, ","), c.MountLabel())
err = mount.Mount("overlay", overlayMount.Source, overlayMount.Type, mountOpts)
if err != nil {
- return "", errors.Wrapf(err, "rootfs-overlay: creating overlay failed %q from native overlay", c.config.Rootfs)
+ return "", fmt.Errorf("rootfs-overlay: creating overlay failed %q from native overlay: %w", c.config.Rootfs, err)
}
}
@@ -1556,7 +1525,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
}
hostUID, hostGID, err := butil.GetHostIDs(util.IDtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), util.IDtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), uint32(execUser.Uid), uint32(execUser.Gid))
if err != nil {
- return "", errors.Wrap(err, "unable to get host UID and host GID")
+ return "", fmt.Errorf("unable to get host UID and host GID: %w", err)
}
//note: this should not be recursive, if using external rootfs users should be responsible on configuring ownership.
@@ -1583,30 +1552,30 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
dirfd, err := unix.Open(mountPoint, unix.O_RDONLY|unix.O_PATH, 0)
if err != nil {
- return "", errors.Wrap(err, "open mount point")
+ return "", fmt.Errorf("open mount point: %w", err)
}
defer unix.Close(dirfd)
err = unix.Mkdirat(dirfd, "etc", 0755)
if err != nil && !os.IsExist(err) {
- return "", errors.Wrap(err, "create /etc")
+ return "", fmt.Errorf("create /etc: %w", err)
}
// If the etc directory was created, chown it to root in the container
if err == nil && (rootUID != 0 || rootGID != 0) {
err = unix.Fchownat(dirfd, "etc", rootUID, rootGID, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
- return "", errors.Wrap(err, "chown /etc")
+ return "", fmt.Errorf("chown /etc: %w", err)
}
}
etcInTheContainerPath, err := securejoin.SecureJoin(mountPoint, "etc")
if err != nil {
- return "", errors.Wrap(err, "resolve /etc in the container")
+ return "", fmt.Errorf("resolve /etc in the container: %w", err)
}
etcInTheContainerFd, err := unix.Open(etcInTheContainerPath, unix.O_RDONLY|unix.O_PATH, 0)
if err != nil {
- return "", errors.Wrap(err, "open /etc in the container")
+ return "", fmt.Errorf("open /etc in the container: %w", err)
}
defer unix.Close(etcInTheContainerFd)
@@ -1614,13 +1583,13 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
// create it, so that mount command within the container will work.
err = unix.Symlinkat("/proc/mounts", etcInTheContainerFd, "mtab")
if err != nil && !os.IsExist(err) {
- return "", errors.Wrap(err, "creating /etc/mtab symlink")
+ return "", fmt.Errorf("creating /etc/mtab symlink: %w", err)
}
// If the symlink was created, then also chown it to root in the container
if err == nil && (rootUID != 0 || rootGID != 0) {
err = unix.Fchownat(etcInTheContainerFd, "mtab", rootUID, rootGID, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
- return "", errors.Wrap(err, "chown /etc/mtab")
+ return "", fmt.Errorf("chown /etc/mtab: %w", err)
}
}
@@ -1654,47 +1623,33 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
logrus.Debugf("Going to mount named volume %s", v.Name)
vol, err := c.runtime.state.Volume(v.Name)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
+ return nil, fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
}
if vol.config.LockID == c.config.LockID {
- return nil, errors.Wrapf(define.ErrWillDeadlock, "container %s and volume %s share lock ID %d", c.ID(), vol.Name(), c.config.LockID)
+ return nil, fmt.Errorf("container %s and volume %s share lock ID %d: %w", c.ID(), vol.Name(), c.config.LockID, define.ErrWillDeadlock)
}
vol.lock.Lock()
defer vol.lock.Unlock()
if vol.needsMount() {
if err := vol.mount(); err != nil {
- return nil, errors.Wrapf(err, "error mounting volume %s for container %s", vol.Name(), c.ID())
+ return nil, fmt.Errorf("error mounting volume %s for container %s: %w", vol.Name(), c.ID(), err)
}
}
// The volume may need a copy-up. Check the state.
if err := vol.update(); err != nil {
return nil, err
}
- if vol.state.NeedsCopyUp {
+ _, hasNoCopy := vol.config.Options["nocopy"]
+ if vol.state.NeedsCopyUp && !cutil.StringInSlice("nocopy", v.Options) && !hasNoCopy {
logrus.Debugf("Copying up contents from container %s to volume %s", c.ID(), vol.Name())
- // If the volume is not empty, we should not copy up.
- volMount := vol.mountPoint()
- contents, err := ioutil.ReadDir(volMount)
- if err != nil {
- return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID())
- }
- if len(contents) > 0 {
- // The volume is not empty. It was likely modified
- // outside of Podman. For safety, let's not copy up into
- // it. Fixes CVE-2020-1726.
- return vol, nil
- }
-
srcDir, err := securejoin.SecureJoin(mountpoint, v.Dest)
if err != nil {
- return nil, errors.Wrapf(err, "error calculating destination path to copy up container %s volume %s", c.ID(), vol.Name())
+ return nil, fmt.Errorf("error calculating destination path to copy up container %s volume %s: %w", c.ID(), vol.Name(), err)
}
// Do a manual stat on the source directory to verify existence.
// Skip the rest if it exists.
- // TODO: Should this be stat or lstat? I'm using lstat because I
- // think copy-up doesn't happen when the source is a link.
srcStat, err := os.Lstat(srcDir)
if err != nil {
if os.IsNotExist(err) {
@@ -1702,7 +1657,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
// up.
return vol, nil
}
- return nil, errors.Wrapf(err, "error identifying source directory for copy up into volume %s", vol.Name())
+ return nil, fmt.Errorf("error identifying source directory for copy up into volume %s: %w", vol.Name(), err)
}
// If it's not a directory we're mounting over it.
if !srcStat.IsDir() {
@@ -1714,12 +1669,25 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
// RHBZ#1928643
srcContents, err := ioutil.ReadDir(srcDir)
if err != nil {
- return nil, errors.Wrapf(err, "error reading contents of source directory for copy up into volume %s", vol.Name())
+ return nil, fmt.Errorf("error reading contents of source directory for copy up into volume %s: %w", vol.Name(), err)
}
if len(srcContents) == 0 {
return vol, nil
}
+ // If the volume is not empty, we should not copy up.
+ volMount := vol.mountPoint()
+ contents, err := ioutil.ReadDir(volMount)
+ if err != nil {
+ return nil, fmt.Errorf("error listing contents of volume %s mountpoint when copying up from container %s: %w", vol.Name(), c.ID(), err)
+ }
+ if len(contents) > 0 {
+ // The volume is not empty. It was likely modified
+ // outside of Podman. For safety, let's not copy up into
+ // it. Fixes CVE-2020-1726.
+ return vol, nil
+ }
+
// Set NeedsCopyUp to false since we are about to do first copy
// Do not copy second time.
vol.state.NeedsCopyUp = false
@@ -1753,11 +1721,11 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
if err2 != nil {
logrus.Errorf("Streaming contents of container %s directory for volume copy-up: %v", c.ID(), err2)
}
- return nil, errors.Wrapf(err, "error copying up to volume %s", vol.Name())
+ return nil, fmt.Errorf("error copying up to volume %s: %w", vol.Name(), err)
}
if err := <-errChan; err != nil {
- return nil, errors.Wrapf(err, "error streaming container content for copy up into volume %s", vol.Name())
+ return nil, fmt.Errorf("error streaming container content for copy up into volume %s: %w", vol.Name(), err)
}
}
return vol, nil
@@ -1792,7 +1760,7 @@ func (c *Container) cleanupStorage() error {
overlayBasePath := filepath.Dir(c.state.Mountpoint)
if err := overlay.Unmount(overlayBasePath); err != nil {
if cleanupErr != nil {
- logrus.Errorf("Failed to cleanup overlay mounts for %s: %v", c.ID(), err)
+ logrus.Errorf("Failed to clean up overlay mounts for %s: %v", c.ID(), err)
}
cleanupErr = err
}
@@ -1809,7 +1777,7 @@ func (c *Container) cleanupStorage() error {
if err := c.cleanupOverlayMounts(); err != nil {
// If the container can't remove content report the error
- logrus.Errorf("Failed to cleanup overlay mounts for %s: %v", c.ID(), err)
+ logrus.Errorf("Failed to clean up overlay mounts for %s: %v", c.ID(), err)
cleanupErr = err
}
@@ -1823,7 +1791,7 @@ func (c *Container) cleanupStorage() error {
// error
// We still want to be able to kick the container out of the
// state
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown || errors.Cause(err) == storage.ErrLayerNotMounted {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) || errors.Is(err, storage.ErrLayerNotMounted) {
logrus.Errorf("Storage for container %s has been removed", c.ID())
} else {
if cleanupErr != nil {
@@ -1840,7 +1808,7 @@ func (c *Container) cleanupStorage() error {
if cleanupErr != nil {
logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
}
- cleanupErr = errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
+ cleanupErr = fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
// We need to try and unmount every volume, so continue
// if they fail.
@@ -1853,7 +1821,7 @@ func (c *Container) cleanupStorage() error {
if cleanupErr != nil {
logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
}
- cleanupErr = errors.Wrapf(err, "error unmounting volume %s for container %s", vol.Name(), c.ID())
+ cleanupErr = fmt.Errorf("error unmounting volume %s for container %s: %w", vol.Name(), c.ID(), err)
}
vol.lock.Unlock()
}
@@ -1878,7 +1846,7 @@ func (c *Container) cleanup(ctx context.Context) error {
// Clean up network namespace, if present
if err := c.cleanupNetwork(); err != nil {
- lastError = errors.Wrapf(err, "error removing container %s network", c.ID())
+ lastError = fmt.Errorf("error removing container %s network: %w", c.ID(), err)
}
// cleanup host entry if it is shared
@@ -1888,7 +1856,7 @@ func (c *Container) cleanup(ctx context.Context) error {
// we cannot use the dependency container lock due ABBA deadlocks
if lock, err := lockfile.GetLockfile(hoststFile); err == nil {
lock.Lock()
- // make sure to ignore ENOENT error in case the netns container was cleanup before this one
+ // make sure to ignore ENOENT error in case the netns container was cleaned up before this one
if err := etchosts.Remove(hoststFile, getLocalhostHostEntry(c)); err != nil && !errors.Is(err, os.ErrNotExist) {
// this error is not fatal we still want to do proper cleanup
logrus.Errorf("failed to remove hosts entry from the netns containers /etc/hosts: %v", err)
@@ -1916,7 +1884,7 @@ func (c *Container) cleanup(ctx context.Context) error {
if lastError != nil {
logrus.Errorf("Unmounting container %s storage: %v", c.ID(), err)
} else {
- lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID())
+ lastError = fmt.Errorf("error unmounting container %s storage: %w", c.ID(), err)
}
}
@@ -1947,6 +1915,18 @@ func (c *Container) cleanup(ctx context.Context) error {
}
}
+ // Prune the exit codes of other container during clean up.
+ // Since Podman is no daemon, we have to clean them up somewhere.
+ // Cleanup seems like a good place as it's not performance
+ // critical.
+ if err := c.runtime.state.PruneContainerExitCodes(); err != nil {
+ if lastError == nil {
+ lastError = err
+ } else {
+ logrus.Errorf("Pruning container exit codes: %v", err)
+ }
+ }
+
return lastError
}
@@ -1988,11 +1968,11 @@ func (c *Container) stopPodIfNeeded(ctx context.Context) error {
// hooks.
func (c *Container) delete(ctx context.Context) error {
if err := c.ociRuntime.DeleteContainer(c); err != nil {
- return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
+ return fmt.Errorf("error removing container %s from runtime: %w", c.ID(), err)
}
if err := c.postDeleteHooks(ctx); err != nil {
- return errors.Wrapf(err, "container %s poststop hooks", c.ID())
+ return fmt.Errorf("container %s poststop hooks: %w", c.ID(), err)
}
return nil
@@ -2051,7 +2031,7 @@ func (c *Container) writeStringToRundir(destFile, contents string) (string, erro
destFileName := filepath.Join(c.state.RunDir, destFile)
if err := os.Remove(destFileName); err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "error removing %s for container %s", destFile, c.ID())
+ return "", fmt.Errorf("error removing %s for container %s: %w", destFile, c.ID(), err)
}
if err := writeStringToPath(destFileName, contents, c.config.MountLabel, c.RootUID(), c.RootGID()); err != nil {
@@ -2085,22 +2065,22 @@ func (c *Container) saveSpec(spec *spec.Spec) error {
jsonPath := filepath.Join(c.bundlePath(), "config.json")
if _, err := os.Stat(jsonPath); err != nil {
if !os.IsNotExist(err) {
- return errors.Wrapf(err, "error doing stat on container %s spec", c.ID())
+ return fmt.Errorf("error doing stat on container %s spec: %w", c.ID(), err)
}
// The spec does not exist, we're fine
} else {
// The spec exists, need to remove it
if err := os.Remove(jsonPath); err != nil {
- return errors.Wrapf(err, "error replacing runtime spec for container %s", c.ID())
+ return fmt.Errorf("error replacing runtime spec for container %s: %w", c.ID(), err)
}
}
fileJSON, err := json.Marshal(spec)
if err != nil {
- return errors.Wrapf(err, "error exporting runtime spec for container %s to JSON", c.ID())
+ return fmt.Errorf("error exporting runtime spec for container %s to JSON: %w", c.ID(), err)
}
if err := ioutil.WriteFile(jsonPath, fileJSON, 0644); err != nil {
- return errors.Wrapf(err, "error writing runtime spec JSON for container %s to disk", c.ID())
+ return fmt.Errorf("error writing runtime spec JSON for container %s to disk: %w", c.ID(), err)
}
logrus.Debugf("Created OCI spec for container %s at %s", c.ID(), jsonPath)
@@ -2163,19 +2143,19 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s
// mount mounts the container's root filesystem
func (c *Container) mount() (string, error) {
if c.state.State == define.ContainerStateRemoving {
- return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
+ return "", fmt.Errorf("cannot mount container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
}
mountPoint, err := c.runtime.storageService.MountContainerImage(c.ID())
if err != nil {
- return "", errors.Wrapf(err, "error mounting storage for container %s", c.ID())
+ return "", fmt.Errorf("error mounting storage for container %s: %w", c.ID(), err)
}
mountPoint, err = filepath.EvalSymlinks(mountPoint)
if err != nil {
- return "", errors.Wrapf(err, "error resolving storage path for container %s", c.ID())
+ return "", fmt.Errorf("error resolving storage path for container %s: %w", c.ID(), err)
}
if err := os.Chown(mountPoint, c.RootUID(), c.RootGID()); err != nil {
- return "", errors.Wrapf(err, "cannot chown %s to %d:%d", mountPoint, c.RootUID(), c.RootGID())
+ return "", fmt.Errorf("cannot chown %s to %d:%d: %w", mountPoint, c.RootUID(), c.RootGID(), err)
}
return mountPoint, nil
}
@@ -2184,7 +2164,7 @@ func (c *Container) mount() (string, error) {
func (c *Container) unmount(force bool) error {
// Also unmount storage
if _, err := c.runtime.storageService.UnmountContainerImage(c.ID(), force); err != nil {
- return errors.Wrapf(err, "error unmounting container %s root filesystem", c.ID())
+ return fmt.Errorf("error unmounting container %s root filesystem: %w", c.ID(), err)
}
return nil
@@ -2197,11 +2177,11 @@ func (c *Container) unmount(force bool) error {
// Returns nil if safe to remove, or an error describing why it's unsafe if not.
func (c *Container) checkReadyForRemoval() error {
if c.state.State == define.ContainerStateUnknown {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
+ return fmt.Errorf("container %s is in invalid state: %w", c.ID(), define.ErrCtrStateInvalid)
}
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) && !c.IsInfra() {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String())
+ return fmt.Errorf("cannot remove container %s as it is %s - running or paused containers cannot be removed without force: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
}
// Check exec sessions
@@ -2210,7 +2190,7 @@ func (c *Container) checkReadyForRemoval() error {
return err
}
if len(sessions) != 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID())
+ return fmt.Errorf("cannot remove container %s as it has active exec sessions: %w", c.ID(), define.ErrCtrStateInvalid)
}
return nil
@@ -2313,7 +2293,7 @@ func (c *Container) checkExitFile() error {
return nil
}
- return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
+ return fmt.Errorf("error running stat on container %s exit file: %w", c.ID(), err)
}
// Alright, it exists. Transition to Stopped state.
@@ -2351,11 +2331,11 @@ func (c *Container) extractSecretToCtrStorage(secr *ContainerSecret) error {
hostUID, hostGID, err := butil.GetHostIDs(util.IDtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), util.IDtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), secr.UID, secr.GID)
if err != nil {
- return errors.Wrap(err, "unable to extract secret")
+ return fmt.Errorf("unable to extract secret: %w", err)
}
err = ioutil.WriteFile(secretFile, data, 0644)
if err != nil {
- return errors.Wrapf(err, "unable to create %s", secretFile)
+ return fmt.Errorf("unable to create %s: %w", secretFile, err)
}
if err := os.Lchown(secretFile, int(hostUID), int(hostGID)); err != nil {
return err
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index d7683cce9..390e95258 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -5,11 +5,11 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
"math"
- "net"
"os"
"os/user"
"path"
@@ -29,6 +29,7 @@ import (
"github.com/containers/buildah/pkg/overlay"
butil "github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/etchosts"
+ "github.com/containers/common/libnetwork/resolvconf"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/apparmor"
"github.com/containers/common/pkg/cgroups"
@@ -36,6 +37,7 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/subscriptions"
"github.com/containers/common/pkg/umask"
+ cutil "github.com/containers/common/pkg/util"
is "github.com/containers/image/v5/storage"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
@@ -43,7 +45,6 @@ import (
"github.com/containers/podman/v4/pkg/checkpoint/crutils"
"github.com/containers/podman/v4/pkg/criu"
"github.com/containers/podman/v4/pkg/lookup"
- "github.com/containers/podman/v4/pkg/resolvconf"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/podman/v4/utils"
@@ -57,7 +58,6 @@ import (
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -65,7 +65,7 @@ import (
func (c *Container) mountSHM(shmOptions string) error {
if err := unix.Mount("shm", c.config.ShmDir, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV,
label.FormatMountLabel(shmOptions, c.config.MountLabel)); err != nil {
- return errors.Wrapf(err, "failed to mount shm tmpfs %q", c.config.ShmDir)
+ return fmt.Errorf("failed to mount shm tmpfs %q: %w", c.config.ShmDir, err)
}
return nil
}
@@ -73,7 +73,7 @@ func (c *Container) mountSHM(shmOptions string) error {
func (c *Container) unmountSHM(mount string) error {
if err := unix.Unmount(mount, 0); err != nil {
if err != syscall.EINVAL && err != syscall.ENOENT {
- return errors.Wrapf(err, "error unmounting container %s SHM mount %s", c.ID(), mount)
+ return fmt.Errorf("error unmounting container %s SHM mount %s: %w", c.ID(), mount, err)
}
// If it's just an EINVAL or ENOENT, debug logs only
logrus.Debugf("Container %s failed to unmount %s : %v", c.ID(), mount, err)
@@ -152,7 +152,7 @@ func (c *Container) prepare() error {
// createErr is guaranteed non-nil, so print
// unconditionally
logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
- createErr = errors.Wrapf(err, "error unmounting storage for container %s after network create failure", c.ID())
+ createErr = fmt.Errorf("error unmounting storage for container %s after network create failure: %w", c.ID(), err)
}
}
@@ -161,7 +161,7 @@ func (c *Container) prepare() error {
if createErr != nil {
if err := c.cleanupNetwork(); err != nil {
logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
- createErr = errors.Wrapf(err, "error cleaning up container %s network after setup failure", c.ID())
+ createErr = fmt.Errorf("error cleaning up container %s network after setup failure: %w", c.ID(), err)
}
}
@@ -251,7 +251,7 @@ func (c *Container) resolveWorkDir() error {
st, err := os.Stat(resolvedWorkdir)
if err == nil {
if !st.IsDir() {
- return errors.Errorf("workdir %q exists on container %s, but is not a directory", workdir, c.ID())
+ return fmt.Errorf("workdir %q exists on container %s, but is not a directory", workdir, c.ID())
}
return nil
}
@@ -265,11 +265,11 @@ func (c *Container) resolveWorkDir() error {
if c.isWorkDirSymlink(resolvedWorkdir) {
return nil
}
- return errors.Errorf("workdir %q does not exist on container %s", workdir, c.ID())
+ return fmt.Errorf("workdir %q does not exist on container %s", workdir, c.ID())
}
// This might be a serious error (e.g., permission), so
// we need to return the full error.
- return errors.Wrapf(err, "error detecting workdir %q on container %s", workdir, c.ID())
+ return fmt.Errorf("error detecting workdir %q on container %s: %w", workdir, c.ID(), err)
}
return nil
}
@@ -277,16 +277,16 @@ func (c *Container) resolveWorkDir() error {
if os.IsExist(err) {
return nil
}
- return errors.Wrapf(err, "error creating container %s workdir", c.ID())
+ return fmt.Errorf("error creating container %s workdir: %w", c.ID(), err)
}
// Ensure container entrypoint is created (if required).
uid, gid, _, err := chrootuser.GetUser(c.state.Mountpoint, c.User())
if err != nil {
- return errors.Wrapf(err, "error looking up %s inside of the container %s", c.User(), c.ID())
+ return fmt.Errorf("error looking up %s inside of the container %s: %w", c.User(), c.ID(), err)
}
if err := os.Chown(resolvedWorkdir, int(uid), int(gid)); err != nil {
- return errors.Wrapf(err, "error chowning container %s workdir to container root", c.ID())
+ return fmt.Errorf("error chowning container %s workdir to container root: %w", c.ID(), err)
}
return nil
@@ -311,7 +311,7 @@ func (c *Container) cleanupNetwork() error {
// Stop the container's network namespace (if it has one)
if err := c.runtime.teardownNetNS(c); err != nil {
- logrus.Errorf("Unable to cleanup network for container %s: %q", c.ID(), err)
+ logrus.Errorf("Unable to clean up network for container %s: %q", c.ID(), err)
}
c.state.NetNS = nil
@@ -367,7 +367,7 @@ func (c *Container) getUserOverrides() *lookup.Overrides {
func lookupHostUser(name string) (*runcuser.ExecUser, error) {
var execUser runcuser.ExecUser
- // Lookup User on host
+ // Look up User on host
u, err := util.LookupUser(name)
if err != nil {
return &execUser, err
@@ -387,13 +387,44 @@ func lookupHostUser(name string) (*runcuser.ExecUser, error) {
return &execUser, nil
}
+// Internal only function which returns upper and work dir from
+// overlay options.
+func getOverlayUpperAndWorkDir(options []string) (string, string, error) {
+ upperDir := ""
+ workDir := ""
+ for _, o := range options {
+ if strings.HasPrefix(o, "upperdir") {
+ splitOpt := strings.SplitN(o, "=", 2)
+ if len(splitOpt) > 1 {
+ upperDir = splitOpt[1]
+ if upperDir == "" {
+ return "", "", errors.New("cannot accept empty value for upperdir")
+ }
+ }
+ }
+ if strings.HasPrefix(o, "workdir") {
+ splitOpt := strings.SplitN(o, "=", 2)
+ if len(splitOpt) > 1 {
+ workDir = splitOpt[1]
+ if workDir == "" {
+ return "", "", errors.New("cannot accept empty value for workdir")
+ }
+ }
+ }
+ }
+ if (upperDir != "" && workDir == "") || (upperDir == "" && workDir != "") {
+ return "", "", errors.New("must specify both upperdir and workdir")
+ }
+ return upperDir, workDir, nil
+}
+
// Generate spec for a container
// Accepts a map of the container's dependencies
func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
overrides := c.getUserOverrides()
execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides)
if err != nil {
- if util.StringInSlice(c.config.User, c.config.HostUsers) {
+ if cutil.StringInSlice(c.config.User, c.config.HostUsers) {
execUser, err = lookupHostUser(c.config.User)
}
if err != nil {
@@ -406,6 +437,14 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
//nolint:staticcheck
g := generate.NewFromSpec(c.config.Spec)
+ // If the flag to mount all devices is set for a privileged container, add
+ // all the devices from the host's machine into the container
+ if c.config.MountAllDevices {
+ if err := util.AddPrivilegedDevices(&g); err != nil {
+ return nil, err
+ }
+ }
+
// If network namespace was requested, add it now
if c.config.CreateNetNS {
if c.config.PostConfigureNetNS {
@@ -446,7 +485,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
for _, namedVol := range c.config.NamedVolumes {
volume, err := c.runtime.GetVolume(namedVol.Name)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID())
+ return nil, fmt.Errorf("error retrieving volume %s to add to container %s: %w", namedVol.Name, c.ID(), err)
}
mountPoint, err := volume.MountPoint()
if err != nil {
@@ -459,23 +498,9 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
for _, o := range namedVol.Options {
if o == "O" {
overlayFlag = true
- }
- if overlayFlag && strings.Contains(o, "upperdir") {
- splitOpt := strings.SplitN(o, "=", 2)
- if len(splitOpt) > 1 {
- upperDir = splitOpt[1]
- if upperDir == "" {
- return nil, errors.New("cannot accept empty value for upperdir")
- }
- }
- }
- if overlayFlag && strings.Contains(o, "workdir") {
- splitOpt := strings.SplitN(o, "=", 2)
- if len(splitOpt) > 1 {
- workDir = splitOpt[1]
- if workDir == "" {
- return nil, errors.New("cannot accept empty value for workdir")
- }
+ upperDir, workDir, err = getOverlayUpperAndWorkDir(namedVol.Options)
+ if err != nil {
+ return nil, err
}
}
}
@@ -488,10 +513,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
return nil, err
}
- if (upperDir != "" && workDir == "") || (upperDir == "" && workDir != "") {
- return nil, errors.Wrapf(err, "must specify both upperdir and workdir")
- }
-
overlayOpts = &overlay.Options{RootUID: c.RootUID(),
RootGID: c.RootGID(),
UpperDirOptionFragment: upperDir,
@@ -501,7 +522,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
overlayMount, err = overlay.MountWithOptions(contentDir, mountPoint, namedVol.Dest, overlayOpts)
if err != nil {
- return nil, errors.Wrapf(err, "mounting overlay failed %q", mountPoint)
+ return nil, fmt.Errorf("mounting overlay failed %q: %w", mountPoint, err)
}
for _, o := range namedVol.Options {
@@ -584,13 +605,24 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Add overlay volumes
for _, overlayVol := range c.config.OverlayVolumes {
+ upperDir, workDir, err := getOverlayUpperAndWorkDir(overlayVol.Options)
+ if err != nil {
+ return nil, err
+ }
contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID())
if err != nil {
return nil, err
}
- overlayMount, err := overlay.Mount(contentDir, overlayVol.Source, overlayVol.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
+ overlayOpts := &overlay.Options{RootUID: c.RootUID(),
+ RootGID: c.RootGID(),
+ UpperDirOptionFragment: upperDir,
+ WorkDirOptionFragment: workDir,
+ GraphOpts: c.runtime.store.GraphOptions(),
+ }
+
+ overlayMount, err := overlay.MountWithOptions(contentDir, overlayVol.Source, overlayVol.Dest, overlayOpts)
if err != nil {
- return nil, errors.Wrapf(err, "mounting overlay failed %q", overlayVol.Source)
+ return nil, fmt.Errorf("mounting overlay failed %q: %w", overlayVol.Source, err)
}
// Check overlay volume options
@@ -614,16 +646,16 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Mount the specified image.
img, _, err := c.runtime.LibimageRuntime().LookupImage(volume.Source, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error creating image volume %q:%q", volume.Source, volume.Dest)
+ return nil, fmt.Errorf("error creating image volume %q:%q: %w", volume.Source, volume.Dest, err)
}
mountPoint, err := img.Mount(ctx, nil, "")
if err != nil {
- return nil, errors.Wrapf(err, "error mounting image volume %q:%q", volume.Source, volume.Dest)
+ return nil, fmt.Errorf("error mounting image volume %q:%q: %w", volume.Source, volume.Dest, err)
}
contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID())
if err != nil {
- return nil, errors.Wrapf(err, "failed to create TempDir in the %s directory", c.config.StaticDir)
+ return nil, fmt.Errorf("failed to create TempDir in the %s directory: %w", c.config.StaticDir, err)
}
var overlayMount spec.Mount
@@ -633,7 +665,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
overlayMount, err = overlay.MountReadOnly(contentDir, mountPoint, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
}
if err != nil {
- return nil, errors.Wrapf(err, "creating overlay mount for image %q failed", volume.Source)
+ return nil, fmt.Errorf("creating overlay mount for image %q failed: %w", volume.Source, err)
}
g.AddMount(overlayMount)
}
@@ -658,7 +690,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if c.config.Umask != "" {
decVal, err := strconv.ParseUint(c.config.Umask, 8, 32)
if err != nil {
- return nil, errors.Wrapf(err, "Invalid Umask Value")
+ return nil, fmt.Errorf("invalid Umask Value: %w", err)
}
umask := uint32(decVal)
g.Config.Process.User.Umask = &umask
@@ -668,7 +700,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if len(c.config.Groups) > 0 {
gids, err := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, overrides)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s", c.ID())
+ return nil, fmt.Errorf("error looking up supplemental groups for container %s: %w", c.ID(), err)
}
for _, gid := range gids {
g.AddProcessAdditionalGid(gid)
@@ -677,7 +709,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if c.Systemd() {
if err := c.setupSystemd(g.Mounts(), g); err != nil {
- return nil, errors.Wrapf(err, "error adding systemd-specific mounts")
+ return nil, fmt.Errorf("error adding systemd-specific mounts: %w", err)
}
}
@@ -693,7 +725,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Check whether the current user namespace has enough gids available.
availableGids, err := rootless.GetAvailableGids()
if err != nil {
- return nil, errors.Wrapf(err, "cannot read number of available GIDs")
+ return nil, fmt.Errorf("cannot read number of available GIDs: %w", err)
}
gidMappings = []idtools.IDMap{{
ContainerID: 0,
@@ -838,6 +870,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if err != nil {
return nil, err
}
+
g.SetLinuxCgroupsPath(cgroupPath)
// Warning: CDI may alter g.Config in place.
@@ -848,7 +881,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
_, err := registry.InjectDevices(g.Config, c.config.CDIDevices...)
if err != nil {
- return nil, errors.Wrapf(err, "error setting up CDI devices")
+ return nil, fmt.Errorf("error setting up CDI devices: %w", err)
}
}
@@ -872,7 +905,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if m.Type == "tmpfs" {
finalPath, err := securejoin.SecureJoin(c.state.Mountpoint, m.Destination)
if err != nil {
- return nil, errors.Wrapf(err, "error resolving symlinks for mount destination %s", m.Destination)
+ return nil, fmt.Errorf("error resolving symlinks for mount destination %s: %w", m.Destination, err)
}
trimmedPath := strings.TrimPrefix(finalPath, strings.TrimSuffix(c.state.Mountpoint, "/"))
m.Destination = trimmedPath
@@ -901,7 +934,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Warning: precreate hooks may alter g.Config in place.
if c.state.ExtensionStageHooks, err = c.setupOCIHooks(ctx, g.Config); err != nil {
- return nil, errors.Wrapf(err, "error setting up OCI Hooks")
+ return nil, fmt.Errorf("error setting up OCI Hooks: %w", err)
}
if len(c.config.EnvSecrets) > 0 {
manager, err := c.runtime.SecretsManager()
@@ -953,11 +986,11 @@ func (c *Container) mountNotifySocket(g generate.Generator) error {
logrus.Debugf("Checking notify %q dir", notifyDir)
if err := os.MkdirAll(notifyDir, 0755); err != nil {
if !os.IsExist(err) {
- return errors.Wrapf(err, "unable to create notify %q dir", notifyDir)
+ return fmt.Errorf("unable to create notify %q dir: %w", notifyDir, err)
}
}
if err := label.Relabel(notifyDir, c.MountLabel(), true); err != nil {
- return errors.Wrapf(err, "relabel failed %q", notifyDir)
+ return fmt.Errorf("relabel failed %q: %w", notifyDir, err)
}
logrus.Debugf("Add bindmount notify %q dir", notifyDir)
if _, ok := c.state.BindMounts["/run/notify"]; !ok {
@@ -1080,7 +1113,7 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro
func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr string, specNS spec.LinuxNamespaceType) error {
nsCtr, err := c.runtime.state.Container(ctr)
if err != nil {
- return errors.Wrapf(err, "error retrieving dependency %s of container %s from state", ctr, c.ID())
+ return fmt.Errorf("error retrieving dependency %s of container %s from state: %w", ctr, c.ID(), err)
}
if specNS == spec.UTSNamespace {
@@ -1090,7 +1123,6 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
g.AddProcessEnv("HOSTNAME", hostname)
}
- // TODO need unlocked version of this for use in pods
nsPath, err := nsCtr.NamespacePath(ns)
if err != nil {
return err
@@ -1110,7 +1142,7 @@ func (c *Container) addCheckpointImageMetadata(importBuilder *buildah.Builder) e
return fmt.Errorf("getting host info: %v", err)
}
- criuVersion, err := criu.GetCriuVestion()
+ criuVersion, err := criu.GetCriuVersion()
if err != nil {
return fmt.Errorf("getting criu version: %v", err)
}
@@ -1168,7 +1200,7 @@ func (c *Container) createCheckpointImage(ctx context.Context, options Container
// Create storage reference
imageRef, err := is.Transport.ParseStoreReference(c.runtime.store, options.CreateImage)
if err != nil {
- return errors.Errorf("Failed to parse image name")
+ return errors.New("failed to parse image name")
}
// Build an image scratch
@@ -1179,7 +1211,7 @@ func (c *Container) createCheckpointImage(ctx context.Context, options Container
if err != nil {
return err
}
- // Clean-up buildah working container
+ // Clean up buildah working container
defer func() {
if err := importBuilder.Delete(); err != nil {
logrus.Errorf("Image builder delete failed: %v", err)
@@ -1232,23 +1264,23 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
// Check if the dependency is an infra container. If it is we can checkpoint
// the container out of the Pod.
if c.config.Pod == "" {
- return errors.Errorf("cannot export checkpoints of containers with dependencies")
+ return errors.New("cannot export checkpoints of containers with dependencies")
}
pod, err := c.runtime.state.Pod(c.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), c.config.Pod)
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), c.config.Pod, err)
}
infraID, err := pod.InfraContainerID()
if err != nil {
- return errors.Wrapf(err, "cannot retrieve infra container ID for pod %s", c.config.Pod)
+ return fmt.Errorf("cannot retrieve infra container ID for pod %s: %w", c.config.Pod, err)
}
if c.Dependencies()[0] != infraID {
- return errors.Errorf("cannot export checkpoints of containers with dependencies")
+ return errors.New("cannot export checkpoints of containers with dependencies")
}
}
if len(c.Dependencies()) > 1 {
- return errors.Errorf("cannot export checkpoints of containers with dependencies")
+ return errors.New("cannot export checkpoints of containers with dependencies")
}
logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
@@ -1276,7 +1308,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
// To correctly track deleted files, let's go through the output of 'podman diff'
rootFsChanges, err := c.runtime.GetDiff("", c.ID(), define.DiffContainer)
if err != nil {
- return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID())
+ return fmt.Errorf("error exporting root file-system diff for %q: %w", c.ID(), err)
}
addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath())
@@ -1293,7 +1325,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
// Create an archive for each volume associated with the container
if !options.IgnoreVolumes {
if err := os.MkdirAll(expVolDir, 0700); err != nil {
- return errors.Wrapf(err, "error creating volumes export directory %q", expVolDir)
+ return fmt.Errorf("error creating volumes export directory %q: %w", expVolDir, err)
}
for _, v := range c.config.NamedVolumes {
@@ -1302,7 +1334,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
volumeTarFile, err := os.Create(volumeTarFileFullPath)
if err != nil {
- return errors.Wrapf(err, "error creating %q", volumeTarFileFullPath)
+ return fmt.Errorf("error creating %q: %w", volumeTarFileFullPath, err)
}
volume, err := c.runtime.GetVolume(v.Name)
@@ -1315,7 +1347,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
return err
}
if mp == "" {
- return errors.Wrapf(define.ErrInternal, "volume %s is not mounted, cannot export", volume.Name())
+ return fmt.Errorf("volume %s is not mounted, cannot export: %w", volume.Name(), define.ErrInternal)
}
input, err := archive.TarWithOptions(mp, &archive.TarOptions{
@@ -1323,7 +1355,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
IncludeSourceDir: true,
})
if err != nil {
- return errors.Wrapf(err, "error reading volume directory %q", v.Dest)
+ return fmt.Errorf("error reading volume directory %q: %w", v.Dest, err)
}
_, err = io.Copy(volumeTarFile, input)
@@ -1343,12 +1375,12 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
})
if err != nil {
- return errors.Wrapf(err, "error reading checkpoint directory %q", c.ID())
+ return fmt.Errorf("error reading checkpoint directory %q: %w", c.ID(), err)
}
outFile, err := os.Create(options.TargetFile)
if err != nil {
- return errors.Wrapf(err, "error creating checkpoint export file %q", options.TargetFile)
+ return fmt.Errorf("error creating checkpoint export file %q: %w", options.TargetFile, err)
}
defer outFile.Close()
@@ -1374,10 +1406,10 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
func (c *Container) checkpointRestoreSupported(version int) error {
if !criu.CheckForCriu(version) {
- return errors.Errorf("checkpoint/restore requires at least CRIU %d", version)
+ return fmt.Errorf("checkpoint/restore requires at least CRIU %d", version)
}
if !c.ociRuntime.SupportsCheckpoint() {
- return errors.Errorf("configured runtime does not support checkpoint/restore")
+ return errors.New("configured runtime does not support checkpoint/restore")
}
return nil
}
@@ -1388,11 +1420,11 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if c.state.State != define.ContainerStateRunning {
- return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
+ return nil, 0, fmt.Errorf("%q is not running, cannot checkpoint: %w", c.state.State, define.ErrCtrStateInvalid)
}
if c.AutoRemove() && options.TargetFile == "" {
- return nil, 0, errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
+ return nil, 0, errors.New("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
}
if err := c.resolveCheckpointImageName(&options); err != nil {
@@ -1473,7 +1505,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
c.state.Restored = false
c.state.RestoredTime = time.Time{}
- // Cleanup Storage and Network
+ // Clean up Storage and Network
if err := c.cleanup(ctx); err != nil {
return nil, 0, err
}
@@ -1485,12 +1517,12 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
statsDirectory, err := os.Open(c.bundlePath())
if err != nil {
- return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath())
+ return nil, fmt.Errorf("not able to open %q: %w", c.bundlePath(), err)
}
dumpStatistics, err := stats.CriuGetDumpStats(statsDirectory)
if err != nil {
- return nil, errors.Wrap(err, "Displaying checkpointing statistics not possible")
+ return nil, fmt.Errorf("displaying checkpointing statistics not possible: %w", err)
}
return &define.CRIUCheckpointRestoreStatistics{
@@ -1536,7 +1568,7 @@ func (c *Container) generateContainerSpec() error {
g := generate.NewFromSpec(c.config.Spec)
if err := c.saveSpec(g.Config); err != nil {
- return errors.Wrap(err, "saving imported container specification for restore failed")
+ return fmt.Errorf("saving imported container specification for restore failed: %w", err)
}
return nil
@@ -1594,14 +1626,14 @@ func (c *Container) importCheckpointTar(input string) error {
func (c *Container) importPreCheckpoint(input string) error {
archiveFile, err := os.Open(input)
if err != nil {
- return errors.Wrap(err, "failed to open pre-checkpoint archive for import")
+ return fmt.Errorf("failed to open pre-checkpoint archive for import: %w", err)
}
defer archiveFile.Close()
err = archive.Untar(archiveFile, c.bundlePath(), nil)
if err != nil {
- return errors.Wrapf(err, "Unpacking of pre-checkpoint archive %s failed", input)
+ return fmt.Errorf("unpacking of pre-checkpoint archive %s failed: %w", input, err)
}
return nil
}
@@ -1618,11 +1650,11 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
if options.Pod != "" && !crutils.CRRuntimeSupportsPodCheckpointRestore(c.ociRuntime.Path()) {
- return nil, 0, errors.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
+ return nil, 0, fmt.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
}
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) {
- return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
+ return nil, 0, fmt.Errorf("container %s is running or paused, cannot restore: %w", c.ID(), define.ErrCtrStateInvalid)
}
if options.ImportPrevious != "" {
@@ -1644,7 +1676,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Let's try to stat() CRIU's inventory file. If it does not exist, it makes
// no sense to try a restore. This is a minimal check if a checkpoint exist.
if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) {
- return nil, 0, errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
+ return nil, 0, fmt.Errorf("a complete checkpoint for this container cannot be found, cannot restore: %w", err)
}
if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil {
@@ -1742,23 +1774,23 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// the ones from the infrastructure container.
pod, err := c.runtime.LookupPod(options.Pod)
if err != nil {
- return nil, 0, errors.Wrapf(err, "pod %q cannot be retrieved", options.Pod)
+ return nil, 0, fmt.Errorf("pod %q cannot be retrieved: %w", options.Pod, err)
}
infraContainer, err := pod.InfraContainer()
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieved infra container from pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieved infra container from pod %q: %w", options.Pod, err)
}
infraContainer.lock.Lock()
if err := infraContainer.syncContainer(); err != nil {
infraContainer.lock.Unlock()
- return nil, 0, errors.Wrapf(err, "Error syncing infrastructure container %s status", infraContainer.ID())
+ return nil, 0, fmt.Errorf("error syncing infrastructure container %s status: %w", infraContainer.ID(), err)
}
if infraContainer.state.State != define.ContainerStateRunning {
if err := infraContainer.initAndStart(ctx); err != nil {
infraContainer.lock.Unlock()
- return nil, 0, errors.Wrapf(err, "Error starting infrastructure container %s status", infraContainer.ID())
+ return nil, 0, fmt.Errorf("error starting infrastructure container %s status: %w", infraContainer.ID(), err)
}
}
infraContainer.lock.Unlock()
@@ -1766,7 +1798,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.IPCNsCtr != "" {
nsPath, err := infraContainer.namespacePath(IPCNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve IPC namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve IPC namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1776,7 +1808,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.NetNsCtr != "" {
nsPath, err := infraContainer.namespacePath(NetNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve network namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve network namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1786,7 +1818,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.PIDNsCtr != "" {
nsPath, err := infraContainer.namespacePath(PIDNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve PID namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve PID namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1796,7 +1828,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.UTSNsCtr != "" {
nsPath, err := infraContainer.namespacePath(UTSNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve UTS namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve UTS namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1806,7 +1838,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if c.config.CgroupNsCtr != "" {
nsPath, err := infraContainer.namespacePath(CgroupNS)
if err != nil {
- return nil, 0, errors.Wrapf(err, "cannot retrieve Cgroup namespace path for Pod %q", options.Pod)
+ return nil, 0, fmt.Errorf("cannot retrieve Cgroup namespace path for Pod %q: %w", options.Pod, err)
}
if err := g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), nsPath); err != nil {
return nil, 0, err
@@ -1874,13 +1906,13 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
volumeFile, err := os.Open(volumeFilePath)
if err != nil {
- return nil, 0, errors.Wrapf(err, "failed to open volume file %s", volumeFilePath)
+ return nil, 0, fmt.Errorf("failed to open volume file %s: %w", volumeFilePath, err)
}
defer volumeFile.Close()
volume, err := c.runtime.GetVolume(v.Name)
if err != nil {
- return nil, 0, errors.Wrapf(err, "failed to retrieve volume %s", v.Name)
+ return nil, 0, fmt.Errorf("failed to retrieve volume %s: %w", v.Name, err)
}
mountPoint, err := volume.MountPoint()
@@ -1888,10 +1920,10 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return nil, 0, err
}
if mountPoint == "" {
- return nil, 0, errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name())
+ return nil, 0, fmt.Errorf("unable to import volume %s as it is not mounted: %w", volume.Name(), err)
}
if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil {
- return nil, 0, errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint)
+ return nil, 0, fmt.Errorf("failed to extract volume %s to %s: %w", volumeFilePath, mountPoint, err)
}
}
}
@@ -1918,12 +1950,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
statsDirectory, err := os.Open(c.bundlePath())
if err != nil {
- return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath())
+ return nil, fmt.Errorf("not able to open %q: %w", c.bundlePath(), err)
}
restoreStatistics, err := stats.CriuGetRestoreStats(statsDirectory)
if err != nil {
- return nil, errors.Wrap(err, "Displaying restore statistics not possible")
+ return nil, fmt.Errorf("displaying restore statistics not possible: %w", err)
}
return &define.CRIUCheckpointRestoreStatistics{
@@ -2001,7 +2033,7 @@ func (c *Container) getRootNetNsDepCtr() (depCtr *Container, err error) {
depCtr, err = c.runtime.state.Container(nextCtr)
if err != nil {
- return nil, errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID())
+ return nil, fmt.Errorf("error fetching dependency %s of container %s: %w", c.config.NetNsCtr, c.ID(), err)
}
// This should never happen without an error
if depCtr == nil {
@@ -2030,7 +2062,7 @@ func (c *Container) mountIntoRootDirs(mountName string, mountPath string) error
// Make standard bind mounts to include in the container
func (c *Container) makeBindMounts() error {
if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil {
- return errors.Wrap(err, "cannot chown run directory")
+ return fmt.Errorf("cannot chown run directory: %w", err)
}
if c.state.BindMounts == nil {
@@ -2048,13 +2080,13 @@ func (c *Container) makeBindMounts() error {
if c.config.NetNsCtr == "" {
if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "container %s", c.ID())
+ return fmt.Errorf("container %s: %w", c.ID(), err)
}
delete(c.state.BindMounts, "/etc/resolv.conf")
}
if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "container %s", c.ID())
+ return fmt.Errorf("container %s: %w", c.ID(), err)
}
delete(c.state.BindMounts, "/etc/hosts")
}
@@ -2067,13 +2099,13 @@ func (c *Container) makeBindMounts() error {
// them.
depCtr, err := c.getRootNetNsDepCtr()
if err != nil {
- return errors.Wrapf(err, "error fetching network namespace dependency container for container %s", c.ID())
+ return fmt.Errorf("error fetching network namespace dependency container for container %s: %w", c.ID(), err)
}
// We need that container's bind mounts
bindMounts, err := depCtr.BindMounts()
if err != nil {
- return errors.Wrapf(err, "error fetching bind mounts from dependency %s of container %s", depCtr.ID(), c.ID())
+ return fmt.Errorf("error fetching bind mounts from dependency %s of container %s: %w", depCtr.ID(), c.ID(), err)
}
// The other container may not have a resolv.conf or /etc/hosts
@@ -2083,7 +2115,7 @@ func (c *Container) makeBindMounts() error {
err := c.mountIntoRootDirs("/etc/resolv.conf", resolvPath)
if err != nil {
- return errors.Wrapf(err, "error assigning mounts to container %s", c.ID())
+ return fmt.Errorf("error assigning mounts to container %s: %w", c.ID(), err)
}
}
@@ -2103,13 +2135,13 @@ func (c *Container) makeBindMounts() error {
err = etchosts.Add(hostsPath, getLocalhostHostEntry(c))
lock.Unlock()
if err != nil {
- return errors.Wrapf(err, "error creating hosts file for container %s which depends on container %s", c.ID(), depCtr.ID())
+ return fmt.Errorf("error creating hosts file for container %s which depends on container %s: %w", c.ID(), depCtr.ID(), err)
}
// finally, save it in the new container
err = c.mountIntoRootDirs(config.DefaultHostsFile, hostsPath)
if err != nil {
- return errors.Wrapf(err, "error assigning mounts to container %s", c.ID())
+ return fmt.Errorf("error assigning mounts to container %s: %w", c.ID(), err)
}
}
@@ -2124,13 +2156,13 @@ func (c *Container) makeBindMounts() error {
} else {
if !c.config.UseImageResolvConf {
if err := c.generateResolvConf(); err != nil {
- return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID())
+ return fmt.Errorf("error creating resolv.conf for container %s: %w", c.ID(), err)
}
}
if !c.config.UseImageHosts {
if err := c.createHosts(); err != nil {
- return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
+ return fmt.Errorf("error creating hosts file for container %s: %w", c.ID(), err)
}
}
}
@@ -2148,7 +2180,7 @@ func (c *Container) makeBindMounts() error {
}
} else if !c.config.UseImageHosts && c.state.BindMounts["/etc/hosts"] == "" {
if err := c.createHosts(); err != nil {
- return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
+ return fmt.Errorf("error creating hosts file for container %s: %w", c.ID(), err)
}
}
@@ -2160,7 +2192,7 @@ func (c *Container) makeBindMounts() error {
if c.config.Passwd == nil || *c.config.Passwd {
newPasswd, newGroup, err := c.generatePasswdAndGroup()
if err != nil {
- return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID())
+ return fmt.Errorf("error creating temporary passwd file for container %s: %w", c.ID(), err)
}
if newPasswd != "" {
// Make /etc/passwd
@@ -2181,7 +2213,7 @@ func (c *Container) makeBindMounts() error {
if _, ok := c.state.BindMounts["/etc/hostname"]; !ok {
hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname())
if err != nil {
- return errors.Wrapf(err, "error creating hostname file for container %s", c.ID())
+ return fmt.Errorf("error creating hostname file for container %s: %w", c.ID(), err)
}
c.state.BindMounts["/etc/hostname"] = hostnamePath
}
@@ -2193,7 +2225,7 @@ func (c *Container) makeBindMounts() error {
if ctrTimezone != "local" {
_, err = time.LoadLocation(ctrTimezone)
if err != nil {
- return errors.Wrapf(err, "error finding timezone for container %s", c.ID())
+ return fmt.Errorf("error finding timezone for container %s: %w", c.ID(), err)
}
}
if _, ok := c.state.BindMounts["/etc/localtime"]; !ok {
@@ -2201,25 +2233,36 @@ func (c *Container) makeBindMounts() error {
if ctrTimezone == "local" {
zonePath, err = filepath.EvalSymlinks("/etc/localtime")
if err != nil {
- return errors.Wrapf(err, "error finding local timezone for container %s", c.ID())
+ return fmt.Errorf("error finding local timezone for container %s: %w", c.ID(), err)
}
} else {
zone := filepath.Join("/usr/share/zoneinfo", ctrTimezone)
zonePath, err = filepath.EvalSymlinks(zone)
if err != nil {
- return errors.Wrapf(err, "error setting timezone for container %s", c.ID())
+ return fmt.Errorf("error setting timezone for container %s: %w", c.ID(), err)
}
}
localtimePath, err := c.copyTimezoneFile(zonePath)
if err != nil {
- return errors.Wrapf(err, "error setting timezone for container %s", c.ID())
+ return fmt.Errorf("error setting timezone for container %s: %w", c.ID(), err)
}
c.state.BindMounts["/etc/localtime"] = localtimePath
}
}
+ _, hasRunContainerenv := c.state.BindMounts["/run/.containerenv"]
+ if !hasRunContainerenv {
+ // check in the spec mounts
+ for _, m := range c.config.Spec.Mounts {
+ if m.Destination == "/run/.containerenv" || m.Destination == "/run" {
+ hasRunContainerenv = true
+ break
+ }
+ }
+ }
+
// Make .containerenv if it does not exist
- if _, ok := c.state.BindMounts["/run/.containerenv"]; !ok {
+ if !hasRunContainerenv {
containerenv := c.runtime.graphRootMountedFlag(c.config.Spec.Mounts)
isRootless := 0
if rootless.IsRootless() {
@@ -2239,7 +2282,7 @@ rootless=%d
}
containerenvPath, err := c.writeStringToRundir(".containerenv", containerenv)
if err != nil {
- return errors.Wrapf(err, "error creating containerenv file for container %s", c.ID())
+ return fmt.Errorf("error creating containerenv file for container %s: %w", c.ID(), err)
}
c.state.BindMounts["/run/.containerenv"] = containerenvPath
}
@@ -2260,7 +2303,7 @@ rootless=%d
if len(c.Secrets()) > 0 {
// create /run/secrets if subscriptions did not create
if err := c.createSecretMountDir(); err != nil {
- return errors.Wrapf(err, "error creating secrets mount")
+ return fmt.Errorf("error creating secrets mount: %w", err)
}
for _, secret := range c.Secrets() {
secretFileName := secret.Name
@@ -2284,49 +2327,10 @@ rootless=%d
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() error {
var (
- nameservers []string
networkNameServers []string
networkSearchDomains []string
)
- hostns := true
- resolvConf := "/etc/resolv.conf"
- for _, namespace := range c.config.Spec.Linux.Namespaces {
- if namespace.Type == spec.NetworkNamespace {
- hostns = false
- if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") {
- definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf")
- _, err := os.Stat(definedPath)
- if err == nil {
- resolvConf = definedPath
- } else if !os.IsNotExist(err) {
- return err
- }
- }
- break
- }
- }
-
- contents, err := ioutil.ReadFile(resolvConf)
- // resolv.conf doesn't have to exists
- if err != nil && !os.IsNotExist(err) {
- return err
- }
-
- ns := resolvconf.GetNameservers(contents)
- // check if systemd-resolved is used, assume it is used when 127.0.0.53 is the only nameserver
- if !hostns && len(ns) == 1 && ns[0] == "127.0.0.53" {
- // read the actual resolv.conf file for systemd-resolved
- resolvedContents, err := ioutil.ReadFile("/run/systemd/resolve/resolv.conf")
- if err != nil {
- if !os.IsNotExist(err) {
- return errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf")
- }
- } else {
- contents = resolvedContents
- }
- }
-
netStatus := c.getNetworkStatus()
for _, status := range netStatus {
if status.DNSServerIPs != nil {
@@ -2346,34 +2350,18 @@ func (c *Container) generateResolvConf() error {
return err
}
- // Ensure that the container's /etc/resolv.conf is compatible with its
- // network configuration.
- resolv, err := resolvconf.FilterResolvDNS(contents, ipv6, !hostns)
- if err != nil {
- return errors.Wrapf(err, "error parsing host resolv.conf")
- }
-
- dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers)+len(c.config.DNSServer))
- for _, i := range c.runtime.config.Containers.DNSServers {
- result := net.ParseIP(i)
- if result == nil {
- return errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i)
- }
- dns = append(dns, result)
+ nameservers := make([]string, 0, len(c.runtime.config.Containers.DNSServers)+len(c.config.DNSServer))
+ nameservers = append(nameservers, c.runtime.config.Containers.DNSServers...)
+ for _, ip := range c.config.DNSServer {
+ nameservers = append(nameservers, ip.String())
}
- dns = append(dns, c.config.DNSServer...)
// If the user provided dns, it trumps all; then dns masq; then resolv.conf
var search []string
- switch {
- case len(dns) > 0:
- // We store DNS servers as net.IP, so need to convert to string
- for _, server := range dns {
- nameservers = append(nameservers, server.String())
- }
- default:
- // Make a new resolv.conf
+ keepHostServers := false
+ if len(nameservers) == 0 {
+ keepHostServers = true
// first add the nameservers from the networks status
- nameservers = append(nameservers, networkNameServers...)
+ nameservers = networkNameServers
// when we add network dns server we also have to add the search domains
search = networkSearchDomains
// slirp4netns has a built in DNS forwarder.
@@ -2385,38 +2373,34 @@ func (c *Container) generateResolvConf() error {
nameservers = append(nameservers, slirp4netnsDNS.String())
}
}
- nameservers = append(nameservers, resolvconf.GetNameservers(resolv.Content)...)
}
if len(c.config.DNSSearch) > 0 || len(c.runtime.config.Containers.DNSSearches) > 0 {
- if !util.StringInSlice(".", c.config.DNSSearch) {
- search = append(search, c.runtime.config.Containers.DNSSearches...)
- search = append(search, c.config.DNSSearch...)
- }
- } else {
- search = append(search, resolvconf.GetSearchDomains(resolv.Content)...)
+ customSearch := make([]string, 0, len(c.config.DNSSearch)+len(c.runtime.config.Containers.DNSSearches))
+ customSearch = append(customSearch, c.runtime.config.Containers.DNSSearches...)
+ customSearch = append(customSearch, c.config.DNSSearch...)
+ search = customSearch
}
- var options []string
- if len(c.config.DNSOption) > 0 || len(c.runtime.config.Containers.DNSOptions) > 0 {
- options = c.runtime.config.Containers.DNSOptions
- options = append(options, c.config.DNSOption...)
- } else {
- options = resolvconf.GetOptions(resolv.Content)
- }
+ options := make([]string, 0, len(c.config.DNSOption)+len(c.runtime.config.Containers.DNSOptions))
+ options = append(options, c.runtime.config.Containers.DNSOptions...)
+ options = append(options, c.config.DNSOption...)
destPath := filepath.Join(c.state.RunDir, "resolv.conf")
- if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "container %s", c.ID())
- }
-
- // Build resolv.conf
- if _, err = resolvconf.Build(destPath, nameservers, search, options); err != nil {
- return errors.Wrapf(err, "error building resolv.conf for container %s", c.ID())
+ if err := resolvconf.New(&resolvconf.Params{
+ IPv6Enabled: ipv6,
+ KeepHostServers: keepHostServers,
+ Nameservers: nameservers,
+ Namespaces: c.config.Spec.Linux.Namespaces,
+ Options: options,
+ Path: destPath,
+ Searches: search,
+ }); err != nil {
+ return fmt.Errorf("error building resolv.conf for container %s: %w", c.ID(), err)
}
- return c.bindMountRootFile(destPath, "/etc/resolv.conf")
+ return c.bindMountRootFile(destPath, resolvconf.DefaultResolvConf)
}
// Check if a container uses IPv6.
@@ -2457,31 +2441,13 @@ func (c *Container) addNameserver(ips []string) error {
}
// Do we have a resolv.conf at all?
- path, ok := c.state.BindMounts["/etc/resolv.conf"]
+ path, ok := c.state.BindMounts[resolvconf.DefaultResolvConf]
if !ok {
return nil
}
- // Read in full contents, parse out existing nameservers
- contents, err := ioutil.ReadFile(path)
- if err != nil {
- return err
- }
- ns := resolvconf.GetNameservers(contents)
- options := resolvconf.GetOptions(contents)
- search := resolvconf.GetSearchDomains(contents)
-
- // We could verify that it doesn't already exist
- // but extra nameservers shouldn't harm anything.
- // Ensure we are the first entry in resolv.conf though, otherwise we
- // might be after user-added servers.
- ns = append(ips, ns...)
-
- // We're rewriting the container's resolv.conf as part of this, but we
- // hold the container lock, so there should be no risk of parallel
- // modification.
- if _, err := resolvconf.Build(path, ns, search, options); err != nil {
- return errors.Wrapf(err, "error adding new nameserver to container %s resolv.conf", c.ID())
+ if err := resolvconf.Add(path, ips); err != nil {
+ return fmt.Errorf("adding new nameserver to container %s resolv.conf: %w", c.ID(), err)
}
return nil
@@ -2496,34 +2462,13 @@ func (c *Container) removeNameserver(ips []string) error {
}
// Do we have a resolv.conf at all?
- path, ok := c.state.BindMounts["/etc/resolv.conf"]
+ path, ok := c.state.BindMounts[resolvconf.DefaultResolvConf]
if !ok {
return nil
}
- // Read in full contents, parse out existing nameservers
- contents, err := ioutil.ReadFile(path)
- if err != nil {
- return err
- }
- ns := resolvconf.GetNameservers(contents)
- options := resolvconf.GetOptions(contents)
- search := resolvconf.GetSearchDomains(contents)
-
- toRemove := make(map[string]bool)
- for _, ip := range ips {
- toRemove[ip] = true
- }
-
- newNS := make([]string, 0, len(ns))
- for _, server := range ns {
- if !toRemove[server] {
- newNS = append(newNS, server)
- }
- }
-
- if _, err := resolvconf.Build(path, newNS, search, options); err != nil {
- return errors.Wrapf(err, "error removing nameservers from container %s resolv.conf", c.ID())
+ if err := resolvconf.Remove(path, ips); err != nil {
+ return fmt.Errorf("removing nameservers from container %s resolv.conf: %w", c.ID(), err)
}
return nil
@@ -2653,16 +2598,16 @@ func (c *Container) generateCurrentUserGroupEntry() (string, int, error) {
g, err := user.LookupGroupId(strconv.Itoa(gid))
if err != nil {
- return "", 0, errors.Wrapf(err, "failed to get current group")
+ return "", 0, fmt.Errorf("failed to get current group: %w", err)
}
- // Lookup group name to see if it exists in the image.
+ // Look up group name to see if it exists in the image.
_, err = lookup.GetGroup(c.state.Mountpoint, g.Name)
if err != runcuser.ErrNoGroupEntries {
return "", 0, err
}
- // Lookup GID to see if it exists in the image.
+ // Look up GID to see if it exists in the image.
_, err = lookup.GetGroup(c.state.Mountpoint, g.Gid)
if err != runcuser.ErrNoGroupEntries {
return "", 0, err
@@ -2675,7 +2620,7 @@ func (c *Container) generateCurrentUserGroupEntry() (string, int, error) {
if uid != 0 {
u, err := user.LookupId(strconv.Itoa(uid))
if err != nil {
- return "", 0, errors.Wrapf(err, "failed to get current user to make group entry")
+ return "", 0, fmt.Errorf("failed to get current user to make group entry: %w", err)
}
username = u.Username
}
@@ -2699,7 +2644,7 @@ func (c *Container) generateUserGroupEntry(addedGID int) (string, error) {
gid, err := strconv.ParseUint(group, 10, 32)
if err != nil {
- return "", nil // nolint: nilerr
+ return "", nil //nolint: nilerr
}
if addedGID != 0 && addedGID == int(gid) {
@@ -2732,7 +2677,7 @@ func (c *Container) generatePasswdEntry() (string, error) {
addedUID := 0
for _, userid := range c.config.HostUsers {
- // Lookup User on host
+ // Look up User on host
u, err := util.LookupUser(userid)
if err != nil {
return "", err
@@ -2773,7 +2718,7 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) {
u, err := user.LookupId(strconv.Itoa(uid))
if err != nil {
- return "", 0, 0, errors.Wrapf(err, "failed to get current user")
+ return "", 0, 0, fmt.Errorf("failed to get current user: %w", err)
}
pwd, err := c.userPasswdEntry(u)
if err != nil {
@@ -2784,13 +2729,13 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) {
}
func (c *Container) userPasswdEntry(u *user.User) (string, error) {
- // Lookup the user to see if it exists in the container image.
+ // Look up the user to see if it exists in the container image.
_, err := lookup.GetUser(c.state.Mountpoint, u.Username)
if err != runcuser.ErrNoPasswdEntries {
return "", err
}
- // Lookup the UID to see if it exists in the container image.
+ // Look up the UID to see if it exists in the container image.
_, err = lookup.GetUser(c.state.Mountpoint, u.Uid)
if err != runcuser.ErrNoPasswdEntries {
return "", err
@@ -2855,14 +2800,14 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, error) {
// If a non numeric User, then don't generate passwd
uid, err := strconv.ParseUint(userspec, 10, 32)
if err != nil {
- return "", nil // nolint: nilerr
+ return "", nil //nolint: nilerr
}
if addedUID != 0 && int(uid) == addedUID {
return "", nil
}
- // Lookup the user to see if it exists in the container image
+ // Look up the user to see if it exists in the container image
_, err = lookup.GetUser(c.state.Mountpoint, userspec)
if err != runcuser.ErrNoPasswdEntries {
return "", err
@@ -2875,7 +2820,7 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, error) {
} else {
group, err := lookup.GetGroup(c.state.Mountpoint, groupspec)
if err != nil {
- return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
+ return "", fmt.Errorf("unable to get gid %s from group file: %w", groupspec, err)
}
gid = group.Gid
}
@@ -2984,7 +2929,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
logrus.Debugf("Making /etc/passwd for container %s", c.ID())
originPasswdFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
if err != nil {
- return "", "", errors.Wrapf(err, "error creating path to container %s /etc/passwd", c.ID())
+ return "", "", fmt.Errorf("error creating path to container %s /etc/passwd: %w", c.ID(), err)
}
orig, err := ioutil.ReadFile(originPasswdFile)
if err != nil && !os.IsNotExist(err) {
@@ -2992,7 +2937,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
}
passwdFile, err := c.writeStringToStaticDir("passwd", string(orig)+passwdEntry)
if err != nil {
- return "", "", errors.Wrapf(err, "failed to create temporary passwd file")
+ return "", "", fmt.Errorf("failed to create temporary passwd file: %w", err)
}
if err := os.Chmod(passwdFile, 0644); err != nil {
return "", "", err
@@ -3002,17 +2947,17 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
logrus.Debugf("Modifying container %s /etc/passwd", c.ID())
containerPasswd, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
if err != nil {
- return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/passwd", c.ID())
+ return "", "", fmt.Errorf("error looking up location of container %s /etc/passwd: %w", c.ID(), err)
}
f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
- return "", "", errors.Wrapf(err, "container %s", c.ID())
+ return "", "", fmt.Errorf("container %s: %w", c.ID(), err)
}
defer f.Close()
if _, err := f.WriteString(passwdEntry); err != nil {
- return "", "", errors.Wrapf(err, "unable to append to container %s /etc/passwd", c.ID())
+ return "", "", fmt.Errorf("unable to append to container %s /etc/passwd: %w", c.ID(), err)
}
default:
logrus.Debugf("Not modifying container %s /etc/passwd", c.ID())
@@ -3030,7 +2975,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
logrus.Debugf("Making /etc/group for container %s", c.ID())
originGroupFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group")
if err != nil {
- return "", "", errors.Wrapf(err, "error creating path to container %s /etc/group", c.ID())
+ return "", "", fmt.Errorf("error creating path to container %s /etc/group: %w", c.ID(), err)
}
orig, err := ioutil.ReadFile(originGroupFile)
if err != nil && !os.IsNotExist(err) {
@@ -3038,7 +2983,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
}
groupFile, err := c.writeStringToStaticDir("group", string(orig)+groupEntry)
if err != nil {
- return "", "", errors.Wrapf(err, "failed to create temporary group file")
+ return "", "", fmt.Errorf("failed to create temporary group file: %w", err)
}
if err := os.Chmod(groupFile, 0644); err != nil {
return "", "", err
@@ -3048,17 +2993,17 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
logrus.Debugf("Modifying container %s /etc/group", c.ID())
containerGroup, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group")
if err != nil {
- return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/group", c.ID())
+ return "", "", fmt.Errorf("error looking up location of container %s /etc/group: %w", c.ID(), err)
}
f, err := os.OpenFile(containerGroup, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
- return "", "", errors.Wrapf(err, "container %s", c.ID())
+ return "", "", fmt.Errorf("container %s: %w", c.ID(), err)
}
defer f.Close()
if _, err := f.WriteString(groupEntry); err != nil {
- return "", "", errors.Wrapf(err, "unable to append to container %s /etc/group", c.ID())
+ return "", "", fmt.Errorf("unable to append to container %s /etc/group: %w", c.ID(), err)
}
default:
logrus.Debugf("Not modifying container %s /etc/group", c.ID())
@@ -3093,7 +3038,7 @@ func (c *Container) expectPodCgroup() (bool, error) {
case cgroupManager == config.CgroupfsCgroupsManager:
return !rootless.IsRootless(), nil
default:
- return false, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup mode %s requested for pods", cgroupManager)
+ return false, fmt.Errorf("invalid cgroup mode %s requested for pods: %w", cgroupManager, define.ErrInvalidArg)
}
}
@@ -3108,7 +3053,7 @@ func (c *Container) getOCICgroupPath() (string, error) {
case c.config.NoCgroups:
return "", nil
case c.config.CgroupsMode == cgroupSplit:
- selfCgroup, err := utils.GetOwnCgroup()
+ selfCgroup, err := utils.GetOwnCgroupDisallowRoot()
if err != nil {
return "", err
}
@@ -3130,7 +3075,7 @@ func (c *Container) getOCICgroupPath() (string, error) {
logrus.Debugf("Setting Cgroup path for container %s to %s", c.ID(), cgroupPath)
return cgroupPath, nil
default:
- return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", cgroupManager)
+ return "", fmt.Errorf("invalid cgroup manager %s requested: %w", cgroupManager, define.ErrInvalidArg)
}
}
@@ -3141,7 +3086,7 @@ func (c *Container) copyTimezoneFile(zonePath string) (string, error) {
return "", err
}
if file.IsDir() {
- return "", errors.New("Invalid timezone: is a directory")
+ return "", errors.New("invalid timezone: is a directory")
}
src, err := os.Open(zonePath)
if err != nil {
@@ -3175,14 +3120,14 @@ func (c *Container) cleanupOverlayMounts() error {
func (c *Container) checkFileExistsInRootfs(file string) (bool, error) {
checkPath, err := securejoin.SecureJoin(c.state.Mountpoint, file)
if err != nil {
- return false, errors.Wrapf(err, "cannot create path to container %s file %q", c.ID(), file)
+ return false, fmt.Errorf("cannot create path to container %s file %q: %w", c.ID(), file, err)
}
stat, err := os.Stat(checkPath)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
- return false, errors.Wrapf(err, "container %s", c.ID())
+ return false, fmt.Errorf("container %s: %w", c.ID(), err)
}
if stat.IsDir() {
return false, nil
@@ -3218,7 +3163,7 @@ func (c *Container) createSecretMountDir() error {
func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
vol, err := c.runtime.state.Volume(v.Name)
if err != nil {
- return errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
+ return fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
}
vol.lock.Lock()
@@ -3229,10 +3174,8 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
return err
}
- // TODO: For now, I've disabled chowning volumes owned by non-Podman
- // drivers. This may be safe, but it's really going to be a case-by-case
- // thing, I think - safest to leave disabled now and re-enable later if
- // there is a demand.
+ // Volumes owned by a volume driver are not chowned - we don't want to
+ // mess with a mount not managed by us.
if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
vol.state.NeedsChown = false
@@ -3247,7 +3190,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
newPair, err := mappings.ToHost(p)
if err != nil {
- return errors.Wrapf(err, "error mapping user %d:%d", uid, gid)
+ return fmt.Errorf("error mapping user %d:%d: %w", uid, gid, err)
}
uid = newPair.UID
gid = newPair.GID
@@ -3282,7 +3225,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
return err
}
stat := st.Sys().(*syscall.Stat_t)
- atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) // nolint: unconvert
+ atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) //nolint: unconvert
if err := os.Chtimes(mountPoint, atime, st.ModTime()); err != nil {
return err
}
diff --git a/libpod/container_log.go b/libpod/container_log.go
index 7a9eb2dbf..a9e0fe065 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"time"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/libpod/logs"
"github.com/nxadm/tail/watch"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -35,9 +35,9 @@ func (r *Runtime) Log(ctx context.Context, containers []*Container, options *log
func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine, colorID int64) error {
switch c.LogDriver() {
case define.PassthroughLogging:
- return errors.Wrapf(define.ErrNoLogs, "this container is using the 'passthrough' log driver, cannot read logs")
+ return fmt.Errorf("this container is using the 'passthrough' log driver, cannot read logs: %w", define.ErrNoLogs)
case define.NoLogging:
- return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs")
+ return fmt.Errorf("this container is using the 'none' log driver, cannot read logs: %w", define.ErrNoLogs)
case define.JournaldLogging:
return c.readFromJournal(ctx, options, logChannel, colorID)
case define.JSONLogging:
@@ -47,7 +47,7 @@ func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logCh
case define.KubernetesLogging, "":
return c.readFromLogFile(ctx, options, logChannel, colorID)
default:
- return errors.Wrapf(define.ErrInternal, "unrecognized log driver %q, cannot read logs", c.LogDriver())
+ return fmt.Errorf("unrecognized log driver %q, cannot read logs: %w", c.LogDriver(), define.ErrInternal)
}
}
@@ -55,10 +55,10 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
t, tailLog, err := logs.GetLogFile(c.LogPath(), options)
if err != nil {
// If the log file does not exist, this is not fatal.
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
return nil
}
- return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
+ return fmt.Errorf("unable to read log file %s for %s : %w", c.ID(), c.LogPath(), err)
}
options.WaitGroup.Add(1)
if len(tailLog) > 0 {
@@ -75,7 +75,6 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
go func() {
defer options.WaitGroup.Done()
- var partial string
for line := range t.Lines {
select {
case <-ctx.Done():
@@ -89,13 +88,6 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
logrus.Errorf("Getting new log line: %v", err)
continue
}
- if nll.Partial() {
- partial += nll.Msg
- continue
- } else if !nll.Partial() && len(partial) > 0 {
- nll.Msg = partial + nll.Msg
- partial = ""
- }
nll.CID = c.ID()
nll.CName = c.Name()
nll.ColorID = colorID
@@ -111,7 +103,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
// until EOF.
state, err := c.State()
if err != nil || state != define.ContainerStateRunning {
- if err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
+ if err != nil && !errors.Is(err, define.ErrNoSuchCtr) {
logrus.Errorf("Getting container state: %v", err)
}
go func() {
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index deb726526..0686caed2 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"strings"
"time"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/libpod/logs"
"github.com/coreos/go-systemd/v22/journal"
"github.com/coreos/go-systemd/v22/sdjournal"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -49,7 +49,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
// We need the container's events in the same journal to guarantee
// consistency, see #10323.
if options.Follow && c.runtime.config.Engine.EventsLogger != "journald" {
- return errors.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger)
+ return fmt.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger)
}
journal, err := sdjournal.NewJournal()
@@ -63,21 +63,21 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
// Add the filters for events.
match := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
if err := journal.AddMatch(match.String()); err != nil {
- return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
}
match = sdjournal.Match{Field: "PODMAN_ID", Value: c.ID()}
if err := journal.AddMatch(match.String()); err != nil {
- return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
}
// Add the filter for logs. Note the disjunction so that we match
// either the events or the logs.
if err := journal.AddDisjunction(); err != nil {
- return errors.Wrap(err, "adding filter disjunction to journald logger")
+ return fmt.Errorf("adding filter disjunction to journald logger: %w", err)
}
match = sdjournal.Match{Field: "CONTAINER_ID_FULL", Value: c.ID()}
if err := journal.AddMatch(match.String()); err != nil {
- return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
}
if err := journal.SeekHead(); err != nil {
@@ -85,12 +85,12 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
}
// API requires Next() immediately after SeekHead().
if _, err := journal.Next(); err != nil {
- return errors.Wrap(err, "next journal")
+ return fmt.Errorf("next journal: %w", err)
}
// API requires a next|prev before getting a cursor.
if _, err := journal.Previous(); err != nil {
- return errors.Wrap(err, "previous journal")
+ return fmt.Errorf("previous journal: %w", err)
}
// Note that the initial cursor may not yet be ready, so we'll do an
@@ -111,7 +111,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
break
}
if cursorError != nil {
- return errors.Wrap(cursorError, "initial journal cursor")
+ return fmt.Errorf("initial journal cursor: %w", cursorError)
}
options.WaitGroup.Add(1)
@@ -255,7 +255,7 @@ func journalFormatterWithID(entry *sdjournal.JournalEntry) (string, error) {
id, ok := entry.Fields["CONTAINER_ID_FULL"]
if !ok {
- return "", fmt.Errorf("no CONTAINER_ID_FULL field present in journal entry")
+ return "", errors.New("no CONTAINER_ID_FULL field present in journal entry")
}
if len(id) > 12 {
id = id[:12]
@@ -290,14 +290,15 @@ func formatterPrefix(entry *sdjournal.JournalEntry) (string, error) {
output := fmt.Sprintf("%s ", tsString)
priority, ok := entry.Fields["PRIORITY"]
if !ok {
- return "", errors.Errorf("no PRIORITY field present in journal entry")
+ return "", errors.New("no PRIORITY field present in journal entry")
}
- if priority == journaldLogOut {
+ switch priority {
+ case journaldLogOut:
output += "stdout "
- } else if priority == journaldLogErr {
+ case journaldLogErr:
output += "stderr "
- } else {
- return "", errors.Errorf("unexpected PRIORITY field in journal entry")
+ default:
+ return "", errors.New("unexpected PRIORITY field in journal entry")
}
// if CONTAINER_PARTIAL_MESSAGE is defined, the log type is "P"
@@ -314,7 +315,7 @@ func formatterMessage(entry *sdjournal.JournalEntry) (string, error) {
// Finally, append the message
msg, ok := entry.Fields["MESSAGE"]
if !ok {
- return "", fmt.Errorf("no MESSAGE field present in journal entry")
+ return "", errors.New("no MESSAGE field present in journal entry")
}
msg = strings.TrimSuffix(msg, "\n")
return msg, nil
diff --git a/libpod/container_log_unsupported.go b/libpod/container_log_unsupported.go
index c84a578cc..bb74a810d 100644
--- a/libpod/container_log_unsupported.go
+++ b/libpod/container_log_unsupported.go
@@ -5,16 +5,16 @@ package libpod
import (
"context"
+ "fmt"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/logs"
- "github.com/pkg/errors"
)
func (c *Container) readFromJournal(_ context.Context, _ *logs.LogOptions, _ chan *logs.LogLine, colorID int64) error {
- return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
+ return fmt.Errorf("journald logging only enabled with systemd on linux: %w", define.ErrOSNotSupported)
}
func (c *Container) initializeJournal(ctx context.Context) error {
- return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
+ return fmt.Errorf("journald logging only enabled with systemd on linux: %w", define.ErrOSNotSupported)
}
diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go
index 80a3749f5..35622d623 100644
--- a/libpod/container_path_resolution.go
+++ b/libpod/container_path_resolution.go
@@ -1,12 +1,12 @@
package libpod
import (
+ "fmt"
"path/filepath"
"strings"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -65,7 +65,7 @@ func (c *Container) resolvePath(mountPoint string, containerPath string) (string
return "", "", err
}
if mountPoint == "" {
- return "", "", errors.Errorf("volume %s is not mounted, cannot copy into it", volume.Name())
+ return "", "", fmt.Errorf("volume %s is not mounted, cannot copy into it", volume.Name())
}
// We found a matching volume for searchPath. We now
diff --git a/libpod/container_stat_linux.go b/libpod/container_stat_linux.go
index bbe3edbb3..72aabb516 100644
--- a/libpod/container_stat_linux.go
+++ b/libpod/container_stat_linux.go
@@ -4,6 +4,8 @@
package libpod
import (
+ "errors"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -11,7 +13,6 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/copy"
- "github.com/pkg/errors"
)
// statInsideMount stats the specified path *inside* the container's mount and PID
@@ -150,10 +151,10 @@ func secureStat(root string, path string) (*copier.StatForItem, error) {
}
if len(globStats) != 1 {
- return nil, errors.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats))
+ return nil, fmt.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats))
}
if len(globStats) != 1 {
- return nil, errors.Errorf("internal error: secureStat: expected 1 result but got %d", len(globStats[0].Results))
+ return nil, fmt.Errorf("internal error: secureStat: expected 1 result but got %d", len(globStats[0].Results))
}
// NOTE: the key in the map differ from `glob` when hitting symlink.
@@ -167,7 +168,7 @@ func secureStat(root string, path string) (*copier.StatForItem, error) {
if stat.IsSymlink {
target, err := copier.Eval(root, path, copier.EvalOptions{})
if err != nil {
- return nil, errors.Wrap(err, "error evaluating symlink in container")
+ return nil, fmt.Errorf("error evaluating symlink in container: %w", err)
}
// Need to make sure the symlink is relative to the root!
target = strings.TrimPrefix(target, root)
diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go
index 9b3dbc873..5571edf73 100644
--- a/libpod/container_top_linux.go
+++ b/libpod/container_top_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"bufio"
+ "errors"
"fmt"
"os"
"strconv"
@@ -14,7 +15,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/psgo"
"github.com/google/shlex"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,15 +22,15 @@ import (
// []string for output
func (c *Container) Top(descriptors []string) ([]string, error) {
if c.config.NoCgroups {
- return nil, errors.Wrapf(define.ErrNoCgroups, "cannot run top on container %s as it did not create a cgroup", c.ID())
+ return nil, fmt.Errorf("cannot run top on container %s as it did not create a cgroup: %w", c.ID(), define.ErrNoCgroups)
}
conStat, err := c.State()
if err != nil {
- return nil, errors.Wrapf(err, "unable to look up state for %s", c.ID())
+ return nil, fmt.Errorf("unable to look up state for %s: %w", c.ID(), err)
}
if conStat != define.ContainerStateRunning {
- return nil, errors.Errorf("top can only be used on running containers")
+ return nil, errors.New("top can only be used on running containers")
}
// Also support comma-separated input.
@@ -59,7 +59,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
for _, d := range descriptors {
shSplit, err := shlex.Split(d)
if err != nil {
- return nil, fmt.Errorf("parsing ps args: %v", err)
+ return nil, fmt.Errorf("parsing ps args: %w", err)
}
for _, s := range shSplit {
if s != "" {
@@ -70,7 +70,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
output, err = c.execPS(psDescriptors)
if err != nil {
- return nil, errors.Wrapf(err, "error executing ps(1) in the container")
+ return nil, fmt.Errorf("error executing ps(1) in the container: %w", err)
}
// Trick: filter the ps command from the output instead of
@@ -96,7 +96,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
// For more details, please refer to github.com/containers/psgo.
func (c *Container) GetContainerPidInformation(descriptors []string) ([]string, error) {
pid := strconv.Itoa(c.state.PID)
- // TODO: psgo returns a [][]string to give users the ability to apply
+ // NOTE: psgo returns a [][]string to give users the ability to apply
// filters on the data. We need to change the API here
// to return a [][]string if we want to make use of
// filtering.
@@ -157,7 +157,7 @@ func (c *Container) execPS(args []string) ([]string, error) {
if err != nil {
return nil, err
} else if ec != 0 {
- return nil, errors.Errorf("Runtime failed with exit status: %d and output: %s", ec, strings.Join(stderr, " "))
+ return nil, fmt.Errorf("runtime failed with exit status: %d and output: %s", ec, strings.Join(stderr, " "))
}
if logrus.GetLevel() >= logrus.DebugLevel {
diff --git a/libpod/container_validate.go b/libpod/container_validate.go
index cfbdd2b1e..e280c60d2 100644
--- a/libpod/container_validate.go
+++ b/libpod/container_validate.go
@@ -5,7 +5,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// Validate that the configuration of a container is valid.
@@ -16,17 +15,17 @@ func (c *Container) validate() error {
// If one of RootfsImageIDor RootfsImageName are set, both must be set.
if (imageIDSet || imageNameSet) && !(imageIDSet && imageNameSet) {
- return errors.Wrapf(define.ErrInvalidArg, "both RootfsImageName and RootfsImageID must be set if either is set")
+ return fmt.Errorf("both RootfsImageName and RootfsImageID must be set if either is set: %w", define.ErrInvalidArg)
}
// Cannot set RootfsImageID and Rootfs at the same time
if imageIDSet && rootfsSet {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set both an image ID and rootfs for a container")
+ return fmt.Errorf("cannot set both an image ID and rootfs for a container: %w", define.ErrInvalidArg)
}
// Must set at least one of RootfsImageID or Rootfs
if !(imageIDSet || rootfsSet) {
- return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs")
+ return fmt.Errorf("must set root filesystem source to either image or rootfs: %w", define.ErrInvalidArg)
}
// A container cannot be marked as an infra and service container at
@@ -38,62 +37,62 @@ func (c *Container) validate() error {
// Cannot make a network namespace if we are joining another container's
// network namespace
if c.config.CreateNetNS && c.config.NetNsCtr != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot both create a network namespace and join another container's network namespace")
+ return fmt.Errorf("cannot both create a network namespace and join another container's network namespace: %w", define.ErrInvalidArg)
}
if c.config.CgroupsMode == cgroupSplit && c.config.CgroupParent != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot specify --cgroup-mode=split with a cgroup-parent")
+ return fmt.Errorf("cannot specify --cgroup-mode=split with a cgroup-parent: %w", define.ErrInvalidArg)
}
// Not creating cgroups has a number of requirements, mostly related to
// the PID namespace.
if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
if c.config.PIDNsCtr != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot join another container's PID namespace if not creating cgroups")
+ return fmt.Errorf("cannot join another container's PID namespace if not creating cgroups: %w", define.ErrInvalidArg)
}
if c.config.CgroupParent != "" {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set cgroup parent if not creating cgroups")
+ return fmt.Errorf("cannot set cgroup parent if not creating cgroups: %w", define.ErrInvalidArg)
}
// Ensure we have a PID namespace
if c.config.Spec.Linux == nil {
- return errors.Wrapf(define.ErrInvalidArg, "must provide Linux namespace configuration in OCI spec when using NoCgroups")
+ return fmt.Errorf("must provide Linux namespace configuration in OCI spec when using NoCgroups: %w", define.ErrInvalidArg)
}
foundPid := false
for _, ns := range c.config.Spec.Linux.Namespaces {
if ns.Type == spec.PIDNamespace {
foundPid = true
if ns.Path != "" {
- return errors.Wrapf(define.ErrInvalidArg, "containers not creating Cgroups must create a private PID namespace - cannot use another")
+ return fmt.Errorf("containers not creating Cgroups must create a private PID namespace - cannot use another: %w", define.ErrInvalidArg)
}
break
}
}
if !foundPid {
- return errors.Wrapf(define.ErrInvalidArg, "containers not creating Cgroups must create a private PID namespace")
+ return fmt.Errorf("containers not creating Cgroups must create a private PID namespace: %w", define.ErrInvalidArg)
}
}
// Can only set static IP or MAC is creating a network namespace.
if !c.config.CreateNetNS && (c.config.StaticIP != nil || c.config.StaticMAC != nil) {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if not creating a network namespace")
+ return fmt.Errorf("cannot set static IP or MAC address if not creating a network namespace: %w", define.ErrInvalidArg)
}
// Cannot set static IP or MAC if joining >1 CNI network.
if len(c.config.Networks) > 1 && (c.config.StaticIP != nil || c.config.StaticMAC != nil) {
- return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if joining more than one network")
+ return fmt.Errorf("cannot set static IP or MAC address if joining more than one network: %w", define.ErrInvalidArg)
}
// Using image resolv.conf conflicts with various DNS settings.
if c.config.UseImageResolvConf &&
(len(c.config.DNSSearch) > 0 || len(c.config.DNSServer) > 0 ||
len(c.config.DNSOption) > 0) {
- return errors.Wrapf(define.ErrInvalidArg, "cannot configure DNS options if using image's resolv.conf")
+ return fmt.Errorf("cannot configure DNS options if using image's resolv.conf: %w", define.ErrInvalidArg)
}
if c.config.UseImageHosts && len(c.config.HostAdd) > 0 {
- return errors.Wrapf(define.ErrInvalidArg, "cannot add to /etc/hosts if using image's /etc/hosts")
+ return fmt.Errorf("cannot add to /etc/hosts if using image's /etc/hosts: %w", define.ErrInvalidArg)
}
// Check named volume, overlay volume and image volume destination conflist
@@ -102,7 +101,7 @@ func (c *Container) validate() error {
// Don't check if they already exist.
// If they don't we will automatically create them.
if _, ok := destinations[vol.Dest]; ok {
- return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
}
destinations[vol.Dest] = true
}
@@ -110,7 +109,7 @@ func (c *Container) validate() error {
// Don't check if they already exist.
// If they don't we will automatically create them.
if _, ok := destinations[vol.Dest]; ok {
- return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
}
destinations[vol.Dest] = true
}
@@ -118,7 +117,7 @@ func (c *Container) validate() error {
// Don't check if they already exist.
// If they don't we will automatically create them.
if _, ok := destinations[vol.Dest]; ok {
- return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
}
destinations[vol.Dest] = true
}
@@ -126,13 +125,13 @@ func (c *Container) validate() error {
// If User in the OCI spec is set, require that c.config.User is set for
// security reasons (a lot of our code relies on c.config.User).
if c.config.User == "" && (c.config.Spec.Process.User.UID != 0 || c.config.Spec.Process.User.GID != 0) {
- return errors.Wrapf(define.ErrInvalidArg, "please set User explicitly via WithUser() instead of in OCI spec directly")
+ return fmt.Errorf("please set User explicitly via WithUser() instead of in OCI spec directly: %w", define.ErrInvalidArg)
}
// Init-ctrs must be used inside a Pod. Check if a init container type is
// passed and if no pod is passed
if len(c.config.InitContainerType) > 0 && len(c.config.Pod) < 1 {
- return errors.Wrap(define.ErrInvalidArg, "init containers must be created in a pod")
+ return fmt.Errorf("init containers must be created in a pod: %w", define.ErrInvalidArg)
}
return nil
}
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index e7b82d654..ccc4ae00f 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -259,9 +259,7 @@ type HealthCheckLog struct {
// as possible from the spec and container config.
// Some things cannot be inferred. These will be populated by spec annotations
// (if available).
-// Field names are fixed for compatibility and cannot be changed.
-// As such, silence lint warnings about them.
-//nolint
+//nolint:revive,stylecheck // Field names are fixed for compatibility and cannot be changed.
type InspectContainerHostConfig struct {
// Binds contains an array of user-added mounts.
// Both volume mounts and named volumes are included.
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
index 9ad3aec08..00080ef37 100644
--- a/libpod/define/containerstate.go
+++ b/libpod/define/containerstate.go
@@ -1,9 +1,8 @@
package define
import (
+ "fmt"
"time"
-
- "github.com/pkg/errors"
)
// ContainerStatus represents the current state of a container
@@ -91,7 +90,7 @@ func StringToContainerStatus(status string) (ContainerStatus, error) {
case ContainerStateRemoving.String():
return ContainerStateRemoving, nil
default:
- return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
+ return ContainerStateUnknown, fmt.Errorf("unknown container state: %s: %w", status, ErrInvalidArg)
}
}
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index f5a7c73e5..b858e1989 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/containers/common/libnetwork/types"
+ "github.com/containers/common/pkg/util"
)
var (
@@ -24,6 +25,10 @@ var (
// not exist.
ErrNoSuchExecSession = errors.New("no such exec session")
+ // ErrNoSuchExitCode indicates that the requested container exit code
+ // does not exist.
+ ErrNoSuchExitCode = errors.New("no such exit code")
+
// ErrDepExists indicates that the current object has dependencies and
// cannot be removed before them.
ErrDepExists = errors.New("dependency exists")
@@ -88,7 +93,7 @@ var (
// ErrDetach indicates that an attach session was manually detached by
// the user.
- ErrDetach = errors.New("detached from container")
+ ErrDetach = util.ErrDetach
// ErrWillDeadlock indicates that the requested operation will cause a
// deadlock. This is usually caused by upgrade issues, and is resolved
diff --git a/libpod/define/exec_codes.go b/libpod/define/exec_codes.go
index f94616b33..3f2da4910 100644
--- a/libpod/define/exec_codes.go
+++ b/libpod/define/exec_codes.go
@@ -1,9 +1,9 @@
package define
import (
+ "errors"
"strings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -23,10 +23,10 @@ const (
// has a predefined exit code associated. If so, it returns that, otherwise it returns
// the exit code originally stated in libpod.Exec()
func TranslateExecErrorToExitCode(originalEC int, err error) int {
- if errors.Cause(err) == ErrOCIRuntimePermissionDenied {
+ if errors.Is(err, ErrOCIRuntimePermissionDenied) {
return ExecErrorCodeCannotInvoke
}
- if errors.Cause(err) == ErrOCIRuntimeNotFound {
+ if errors.Is(err, ErrOCIRuntimeNotFound) {
return ExecErrorCodeNotFound
}
return originalEC
diff --git a/libpod/define/healthchecks.go b/libpod/define/healthchecks.go
index bde449d30..f71274350 100644
--- a/libpod/define/healthchecks.go
+++ b/libpod/define/healthchecks.go
@@ -47,3 +47,13 @@ const (
// DefaultHealthCheckTimeout default value
DefaultHealthCheckTimeout = "30s"
)
+
+// HealthConfig.Test options
+const (
+ // HealthConfigTestNone disables healthcheck
+ HealthConfigTestNone = "NONE"
+ // HealthConfigTestCmd execs arguments directly
+ HealthConfigTestCmd = "CMD"
+ // HealthConfigTestCmdShell runs commands with the system's default shell
+ HealthConfigTestCmdShell = "CMD-SHELL"
+)
diff --git a/libpod/define/info.go b/libpod/define/info.go
index 911fa5c03..c716bec7b 100644
--- a/libpod/define/info.go
+++ b/libpod/define/info.go
@@ -14,7 +14,7 @@ type Info struct {
Version Version `json:"version"`
}
-// HostInfo describes the libpod host
+// SecurityInfo describes the libpod host
type SecurityInfo struct {
AppArmorEnabled bool `json:"apparmorEnabled"`
DefaultCapabilities string `json:"capabilities"`
@@ -64,8 +64,7 @@ type RemoteSocket struct {
Exists bool `json:"exists,omitempty"`
}
-// SlirpInfo describes the slirp executable that
-// is being being used.
+// SlirpInfo describes the slirp executable that is being used
type SlirpInfo struct {
Executable string `json:"executable"`
Package string `json:"package"`
@@ -78,8 +77,7 @@ type IDMappings struct {
UIDMap []idtools.IDMap `json:"uidmap"`
}
-// DistributionInfo describes the host distribution
-// for libpod
+// DistributionInfo describes the host distribution for libpod
type DistributionInfo struct {
Distribution string `json:"distribution"`
Variant string `json:"variant,omitempty"`
@@ -141,8 +139,8 @@ type Plugins struct {
Volume []string `json:"volume"`
Network []string `json:"network"`
Log []string `json:"log"`
- // FIXME what should we do with Authorization, docker seems to return nothing by default
- // Authorization []string `json:"authorization"`
+ // Authorization is provided for compatibility, will always be nil as Podman has no daemon
+ Authorization []string `json:"authorization"`
}
type CPUUsage struct {
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index 219ffade2..2afef48c4 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -69,6 +69,8 @@ type InspectPodData struct {
VolumesFrom []string `json:"volumes_from,omitempty"`
// SecurityOpt contains the specified security labels and related SELinux information
SecurityOpts []string `json:"security_opt,omitempty"`
+ // MemoryLimit contains the specified cgroup memory limit for the pod
+ MemoryLimit uint64 `json:"memory_limit,omitempty"`
}
// InspectPodInfraConfig contains the configuration of the pod's infra
@@ -82,6 +84,7 @@ type InspectPodInfraConfig struct {
HostNetwork bool
// StaticIP is a static IPv4 that will be assigned to the infra
// container and then used by the pod.
+ // swagger:strfmt ipv4
StaticIP net.IP
// StaticMAC is a static MAC address that will be assigned to the infra
// container and then used by the pod.
@@ -119,6 +122,8 @@ type InspectPodInfraConfig struct {
PidNS string `json:"pid_ns,omitempty"`
// UserNS is the usernamespace that all the containers in the pod will join.
UserNS string `json:"userns,omitempty"`
+ // UtsNS is the uts namespace that all containers in the pod will join
+ UtsNS string `json:"uts_ns,omitempty"`
}
// InspectPodContainerInfo contains information on a container in a pod.
diff --git a/libpod/define/terminal.go b/libpod/define/terminal.go
deleted file mode 100644
index ce8955544..000000000
--- a/libpod/define/terminal.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package define
-
-// TerminalSize represents the width and height of a terminal.
-type TerminalSize struct {
- Width uint16
- Height uint16
-}
diff --git a/libpod/define/volume_inspect.go b/libpod/define/volume_inspect.go
index fac179176..f731a8735 100644
--- a/libpod/define/volume_inspect.go
+++ b/libpod/define/volume_inspect.go
@@ -56,4 +56,12 @@ type InspectVolumeData struct {
// a container, the container will chown the volume to the container process
// UID/GID.
NeedsChown bool `json:"NeedsChown,omitempty"`
+ // Timeout is the specified driver timeout if given
+ Timeout int `json:"Timeout,omitempty"`
+}
+
+type VolumeReload struct {
+ Added []string
+ Removed []string
+ Errors []error
}
diff --git a/libpod/diff.go b/libpod/diff.go
index 86fa063ec..8f0ad9355 100644
--- a/libpod/diff.go
+++ b/libpod/diff.go
@@ -1,10 +1,11 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/layers"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
)
var initInodes = map[string]bool{
@@ -76,5 +77,5 @@ func (r *Runtime) getLayerID(id string, diffType define.DiffType) (string, error
}
lastErr = err
}
- return "", errors.Wrapf(lastErr, "%s not found", id)
+ return "", fmt.Errorf("%s not found: %w", id, lastErr)
}
diff --git a/libpod/events.go b/libpod/events.go
index f09d8402a..c9e4c9d26 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -6,7 +6,6 @@ import (
"sync"
"github.com/containers/podman/v4/libpod/events"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -33,6 +32,16 @@ func (c *Container) newContainerEvent(status events.Status) {
Attributes: c.Labels(),
}
+ // if the current event is a HealthStatus event, we need to get the current
+ // status of the container to pass to the event
+ if status == events.HealthStatus {
+ containerHealthStatus, err := c.healthCheckStatus()
+ if err != nil {
+ e.HealthStatus = fmt.Sprintf("%v", err)
+ }
+ e.HealthStatus = containerHealthStatus
+ }
+
if err := c.runtime.eventer.Write(e); err != nil {
logrus.Errorf("Unable to write pod event: %q", err)
}
@@ -151,6 +160,9 @@ func (r *Runtime) GetEvents(ctx context.Context, filters []string) ([]*events.Ev
// GetLastContainerEvent takes a container name or ID and an event status and returns
// the last occurrence of the container event
func (r *Runtime) GetLastContainerEvent(ctx context.Context, nameOrID string, containerEvent events.Status) (*events.Event, error) {
+ // FIXME: events should be read in reverse order!
+ // https://github.com/containers/podman/issues/14579
+
// check to make sure the event.Status is valid
if _, err := events.StringToStatus(containerEvent.String()); err != nil {
return nil, err
@@ -165,7 +177,7 @@ func (r *Runtime) GetLastContainerEvent(ctx context.Context, nameOrID string, co
return nil, err
}
if len(containerEvents) < 1 {
- return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String())
+ return nil, fmt.Errorf("%s not found: %w", containerEvent.String(), events.ErrEventNotFound)
}
// return the last element in the slice
return containerEvents[len(containerEvents)-1], nil
@@ -188,7 +200,7 @@ func (r *Runtime) GetExecDiedEvent(ctx context.Context, nameOrID, execSessionID
// There *should* only be one event maximum.
// But... just in case... let's not blow up if there's more than one.
if len(containerEvents) < 1 {
- return nil, errors.Wrapf(events.ErrEventNotFound, "exec died event for session %s (container %s) not found", execSessionID, nameOrID)
+ return nil, fmt.Errorf("exec died event for session %s (container %s) not found: %w", execSessionID, nameOrID, events.ErrEventNotFound)
}
return containerEvents[len(containerEvents)-1], nil
}
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 2e7016136..4ea45a00e 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -2,9 +2,8 @@ package events
import (
"context"
+ "errors"
"time"
-
- "github.com/pkg/errors"
)
// EventerType ...
@@ -40,6 +39,8 @@ type Event struct {
Time time.Time
// Type of event that occurred
Type Type
+ // Health status of the current container
+ HealthStatus string `json:"health_status,omitempty"`
Details
}
@@ -141,6 +142,8 @@ const (
Exited Status = "died"
// Export ...
Export Status = "export"
+ // HealthStatus ...
+ HealthStatus Status = "health_status"
// History ...
History Status = "history"
// Import ...
diff --git a/libpod/events/events.go b/libpod/events/events.go
index e83c2efee..764481e51 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -2,16 +2,16 @@ package events
import (
"encoding/json"
+ "errors"
"fmt"
"time"
"github.com/containers/storage/pkg/stringid"
- "github.com/pkg/errors"
)
// ErrNoJournaldLogging indicates that there is no journald logging
// supported (requires libsystemd)
-var ErrNoJournaldLogging = errors.New("No support for journald logging")
+var ErrNoJournaldLogging = errors.New("no support for journald logging")
// String returns a string representation of EventerType
func (et EventerType) String() string {
@@ -76,7 +76,7 @@ func (e *Event) ToHumanReadable(truncate bool) string {
}
switch e.Type {
case Container, Pod:
- humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name)
+ humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s, health_status=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name, e.HealthStatus)
// check if the container has labels and add it to the output
if len(e.Attributes) > 0 {
for k, v := range e.Attributes {
@@ -140,12 +140,10 @@ func StringToType(name string) (Type, error) {
case "":
return "", ErrEventTypeBlank
}
- return "", errors.Errorf("unknown event type %q", name)
+ return "", fmt.Errorf("unknown event type %q", name)
}
// StringToStatus converts a string to an Event Status
-// TODO if we add more events, we might consider a go-generator to
-// create the switch statement
func StringToStatus(name string) (Status, error) {
switch name {
case Attach.String():
@@ -170,6 +168,8 @@ func StringToStatus(name string) (Status, error) {
return Exited, nil
case Export.String():
return Export, nil
+ case HealthStatus.String():
+ return HealthStatus, nil
case History.String():
return History, nil
case Import.String():
@@ -225,5 +225,5 @@ func StringToStatus(name string) (Status, error) {
case Untag.String():
return Untag, nil
}
- return "", errors.Errorf("unknown event status %q", name)
+ return "", fmt.Errorf("unknown event status %q", name)
}
diff --git a/libpod/events/events_linux.go b/libpod/events/events_linux.go
index 4320f2190..e7801af5b 100644
--- a/libpod/events/events_linux.go
+++ b/libpod/events/events_linux.go
@@ -1,9 +1,9 @@
package events
import (
+ "fmt"
"strings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -14,7 +14,7 @@ func NewEventer(options EventerOptions) (Eventer, error) {
case strings.ToUpper(Journald.String()):
eventer, err := newEventJournalD(options)
if err != nil {
- return nil, errors.Wrapf(err, "eventer creation")
+ return nil, fmt.Errorf("eventer creation: %w", err)
}
return eventer, nil
case strings.ToUpper(LogFile.String()):
@@ -24,6 +24,6 @@ func NewEventer(options EventerOptions) (Eventer, error) {
case strings.ToUpper(Memory.String()):
return NewMemoryEventer(), nil
default:
- return nil, errors.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType))
+ return nil, fmt.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType))
}
}
diff --git a/libpod/events/events_unsupported.go b/libpod/events/events_unsupported.go
index 25c175524..d766402a9 100644
--- a/libpod/events/events_unsupported.go
+++ b/libpod/events/events_unsupported.go
@@ -3,7 +3,7 @@
package events
-import "github.com/pkg/errors"
+import "errors"
// NewEventer creates an eventer based on the eventer type
func NewEventer(options EventerOptions) (Eventer, error) {
diff --git a/libpod/events/filters.go b/libpod/events/filters.go
index 64c162db2..d5b96e7ec 100644
--- a/libpod/events/filters.go
+++ b/libpod/events/filters.go
@@ -1,11 +1,11 @@
package events
import (
+ "fmt"
"strings"
"time"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error) {
@@ -74,7 +74,7 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error
return found
}, nil
}
- return nil, errors.Errorf("%s is an invalid filter", filter)
+ return nil, fmt.Errorf("%s is an invalid filter", filter)
}
func generateEventSinceOption(timeSince time.Time) func(e *Event) bool {
@@ -92,7 +92,7 @@ func generateEventUntilOption(timeUntil time.Time) func(e *Event) bool {
func parseFilter(filter string) (string, string, error) {
filterSplit := strings.SplitN(filter, "=", 2)
if len(filterSplit) != 2 {
- return "", "", errors.Errorf("%s is an invalid filter", filter)
+ return "", "", fmt.Errorf("%s is an invalid filter", filter)
}
return filterSplit[0], filterSplit[1], nil
}
@@ -137,7 +137,7 @@ func generateEventFilters(filters []string, since, until string) (map[string][]E
if len(since) > 0 {
timeSince, err := util.ParseInputTime(since, true)
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert since time of %s", since)
+ return nil, fmt.Errorf("unable to convert since time of %s: %w", since, err)
}
filterFunc := generateEventSinceOption(timeSince)
filterMap["since"] = []EventFilter{filterFunc}
@@ -146,7 +146,7 @@ func generateEventFilters(filters []string, since, until string) (map[string][]E
if len(until) > 0 {
timeUntil, err := util.ParseInputTime(until, false)
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert until time of %s", until)
+ return nil, fmt.Errorf("unable to convert until time of %s: %w", until, err)
}
filterFunc := generateEventUntilOption(timeUntil)
filterMap["until"] = []EventFilter{filterFunc}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 866042a4c..0a0a768d0 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -6,13 +6,14 @@ package events
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"strconv"
"time"
"github.com/containers/podman/v4/pkg/util"
"github.com/coreos/go-systemd/v22/journal"
"github.com/coreos/go-systemd/v22/sdjournal"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -58,13 +59,14 @@ func (e EventJournalD) Write(ee Event) error {
}
m["PODMAN_LABELS"] = string(b)
}
+ m["PODMAN_HEALTH_STATUS"] = ee.HealthStatus
case Network:
m["PODMAN_ID"] = ee.ID
m["PODMAN_NETWORK_NAME"] = ee.Network
case Volume:
m["PODMAN_NAME"] = ee.Name
}
- return journal.Send(string(ee.ToHumanReadable(false)), journal.PriInfo, m)
+ return journal.Send(ee.ToHumanReadable(false), journal.PriInfo, m)
}
// Read reads events from the journal and sends qualified events to the event channel
@@ -72,7 +74,7 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
defer close(options.EventChannel)
filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
if err != nil {
- return errors.Wrapf(err, "failed to parse event filters")
+ return fmt.Errorf("failed to parse event filters: %w", err)
}
var untilTime time.Time
@@ -95,29 +97,29 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
// match only podman journal entries
podmanJournal := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
if err := j.AddMatch(podmanJournal.String()); err != nil {
- return errors.Wrap(err, "failed to add journal filter for event log")
+ return fmt.Errorf("failed to add journal filter for event log: %w", err)
}
if len(options.Since) == 0 && len(options.Until) == 0 && options.Stream {
if err := j.SeekTail(); err != nil {
- return errors.Wrap(err, "failed to seek end of journal")
+ return fmt.Errorf("failed to seek end of journal: %w", err)
}
// After SeekTail calling Next moves to a random entry.
// To prevent this we have to call Previous first.
// see: https://bugs.freedesktop.org/show_bug.cgi?id=64614
if _, err := j.Previous(); err != nil {
- return errors.Wrap(err, "failed to move journal cursor to previous entry")
+ return fmt.Errorf("failed to move journal cursor to previous entry: %w", err)
}
}
// the api requires a next|prev before getting a cursor
if _, err := j.Next(); err != nil {
- return errors.Wrap(err, "failed to move journal cursor to next entry")
+ return fmt.Errorf("failed to move journal cursor to next entry: %w", err)
}
prevCursor, err := j.GetCursor()
if err != nil {
- return errors.Wrap(err, "failed to get journal cursor")
+ return fmt.Errorf("failed to get journal cursor: %w", err)
}
for {
select {
@@ -129,11 +131,11 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
}
if _, err := j.Next(); err != nil {
- return errors.Wrap(err, "failed to move journal cursor to next entry")
+ return fmt.Errorf("failed to move journal cursor to next entry: %w", err)
}
newCursor, err := j.GetCursor()
if err != nil {
- return errors.Wrap(err, "failed to get journal cursor")
+ return fmt.Errorf("failed to get journal cursor: %w", err)
}
if prevCursor == newCursor {
if !options.Stream || (len(options.Until) > 0 && time.Now().After(untilTime)) {
@@ -150,14 +152,14 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
entry, err := j.GetEntry()
if err != nil {
- return errors.Wrap(err, "failed to read journal entry")
+ return fmt.Errorf("failed to read journal entry: %w", err)
}
newEvent, err := newEventFromJournalEntry(entry)
if err != nil {
// We can't decode this event.
// Don't fail hard - that would make events unusable.
// Instead, log and continue.
- if errors.Cause(err) != ErrEventTypeBlank {
+ if !errors.Is(err, ErrEventTypeBlank) {
logrus.Errorf("Unable to decode event: %v", err)
}
continue
@@ -167,10 +169,9 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
}
}
return nil
-
}
-func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { //nolint
+func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) {
newEvent := Event{}
eventType, err := StringToType(entry.Fields["PODMAN_TYPE"])
if err != nil {
@@ -214,6 +215,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { /
newEvent.Details = Details{Attributes: labels}
}
}
+ newEvent.HealthStatus = entry.Fields["PODMAN_HEALTH_STATUS"]
case Network:
newEvent.ID = entry.Fields["PODMAN_ID"]
newEvent.Network = entry.Fields["PODMAN_NETWORK_NAME"]
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index 21fdd8027..4dafd8600 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -6,6 +6,7 @@ package events
import (
"bufio"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -16,7 +17,6 @@ import (
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage/pkg/lockfile"
"github.com/nxadm/tail"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -90,7 +90,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
defer close(options.EventChannel)
filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
if err != nil {
- return errors.Wrapf(err, "failed to parse event filters")
+ return fmt.Errorf("failed to parse event filters: %w", err)
}
t, err := e.getTail(options)
if err != nil {
@@ -136,7 +136,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
case Image, Volume, Pod, System, Container, Network:
// no-op
default:
- return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
+ return fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
}
if copy && applyFilters(event, filterMap) {
options.EventChannel <- event
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index 40af9aec3..9b9d12b17 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -2,6 +2,8 @@ package libpod
import (
"bufio"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -9,7 +11,6 @@ import (
"time"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -26,7 +27,7 @@ const (
func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) {
container, err := r.LookupContainer(name)
if err != nil {
- return define.HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name)
+ return define.HealthCheckContainerNotFound, fmt.Errorf("unable to look up %s to perform a health check: %w", name, err)
}
hcStatus, err := checkHealthCheckCanBeRun(container)
if err == nil {
@@ -44,14 +45,14 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
)
hcCommand := c.HealthCheckConfig().Test
if len(hcCommand) < 1 {
- return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
}
switch hcCommand[0] {
- case "", "NONE":
- return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
- case "CMD":
+ case "", define.HealthConfigTestNone:
+ return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
+ case define.HealthConfigTestCmd:
newCommand = hcCommand[1:]
- case "CMD-SHELL":
+ case define.HealthConfigTestCmdShell:
// TODO: SHELL command from image not available in Container - use Docker default
newCommand = []string{"/bin/sh", "-c", strings.Join(hcCommand[1:], " ")}
default:
@@ -59,11 +60,11 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
newCommand = hcCommand
}
if len(newCommand) < 1 || newCommand[0] == "" {
- return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
}
rPipe, wPipe, err := os.Pipe()
if err != nil {
- return define.HealthCheckInternalError, errors.Wrapf(err, "unable to create pipe for healthcheck session")
+ return define.HealthCheckInternalError, fmt.Errorf("unable to create pipe for healthcheck session: %w", err)
}
defer wPipe.Close()
defer rPipe.Close()
@@ -90,13 +91,12 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
hcResult := define.HealthCheckSuccess
config := new(ExecConfig)
config.Command = newCommand
- exitCode, hcErr := c.Exec(config, streams, nil)
+ exitCode, hcErr := c.exec(config, streams, nil, true)
if hcErr != nil {
- errCause := errors.Cause(hcErr)
hcResult = define.HealthCheckFailure
- if errCause == define.ErrOCIRuntimeNotFound ||
- errCause == define.ErrOCIRuntimePermissionDenied ||
- errCause == define.ErrOCIRuntime {
+ if errors.Is(hcErr, define.ErrOCIRuntimeNotFound) ||
+ errors.Is(hcErr, define.ErrOCIRuntimePermissionDenied) ||
+ errors.Is(hcErr, define.ErrOCIRuntime) {
returnCode = 1
hcErr = nil
} else {
@@ -125,11 +125,11 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout {
returnCode = -1
hcResult = define.HealthCheckFailure
- hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
+ hcErr = fmt.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
}
hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog)
if err := c.updateHealthCheckLog(hcl, inStartPeriod); err != nil {
- return hcResult, errors.Wrapf(err, "unable to update health check log %s for %s", c.healthCheckLogPath(), c.ID())
+ return hcResult, fmt.Errorf("unable to update health check log %s for %s: %w", c.healthCheckLogPath(), c.ID(), err)
}
return hcResult, hcErr
}
@@ -140,10 +140,10 @@ func checkHealthCheckCanBeRun(c *Container) (define.HealthCheckStatus, error) {
return define.HealthCheckInternalError, err
}
if cstate != define.ContainerStateRunning {
- return define.HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
+ return define.HealthCheckContainerStopped, fmt.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {
- return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
}
return define.HealthCheckDefined, nil
}
@@ -167,7 +167,7 @@ func (c *Container) updateHealthStatus(status string) error {
healthCheck.Status = status
newResults, err := json.Marshal(healthCheck)
if err != nil {
- return errors.Wrapf(err, "unable to marshall healthchecks for writing status")
+ return fmt.Errorf("unable to marshall healthchecks for writing status: %w", err)
}
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
}
@@ -201,7 +201,7 @@ func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPerio
}
newResults, err := json.Marshal(healthCheck)
if err != nil {
- return errors.Wrapf(err, "unable to marshall healthchecks for writing")
+ return fmt.Errorf("unable to marshall healthchecks for writing: %w", err)
}
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
}
@@ -222,28 +222,37 @@ func (c *Container) getHealthCheckLog() (define.HealthCheckResults, error) {
}
b, err := ioutil.ReadFile(c.healthCheckLogPath())
if err != nil {
- return healthCheck, errors.Wrap(err, "failed to read health check log file")
+ return healthCheck, fmt.Errorf("failed to read health check log file: %w", err)
}
if err := json.Unmarshal(b, &healthCheck); err != nil {
- return healthCheck, errors.Wrapf(err, "failed to unmarshal existing healthcheck results in %s", c.healthCheckLogPath())
+ return healthCheck, fmt.Errorf("failed to unmarshal existing healthcheck results in %s: %w", c.healthCheckLogPath(), err)
}
return healthCheck, nil
}
// HealthCheckStatus returns the current state of a container with a healthcheck
func (c *Container) HealthCheckStatus() (string, error) {
- if !c.HasHealthCheck() {
- return "", errors.Errorf("container %s has no defined healthcheck", c.ID())
- }
c.lock.Lock()
defer c.lock.Unlock()
+ return c.healthCheckStatus()
+}
+
+// Internal function to return the current state of a container with a healthcheck.
+// This function does not lock the container.
+func (c *Container) healthCheckStatus() (string, error) {
+ if !c.HasHealthCheck() {
+ return "", fmt.Errorf("container %s has no defined healthcheck", c.ID())
+ }
+
if err := c.syncContainer(); err != nil {
return "", err
}
+
results, err := c.getHealthCheckLog()
if err != nil {
- return "", errors.Wrapf(err, "unable to get healthcheck log for %s", c.ID())
+ return "", fmt.Errorf("unable to get healthcheck log for %s: %w", c.ID(), err)
}
+
return results.Status, nil
}
diff --git a/libpod/healthcheck_linux.go b/libpod/healthcheck_linux.go
index 45b3a0e41..3fb6dfb91 100644
--- a/libpod/healthcheck_linux.go
+++ b/libpod/healthcheck_linux.go
@@ -7,9 +7,9 @@ import (
"os/exec"
"strings"
+ "github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/systemd"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -20,7 +20,7 @@ func (c *Container) createTimer() error {
}
podman, err := os.Executable()
if err != nil {
- return errors.Wrapf(err, "failed to get path for podman for a health check timer")
+ return fmt.Errorf("failed to get path for podman for a health check timer: %w", err)
}
var cmd = []string{}
@@ -35,17 +35,28 @@ func (c *Container) createTimer() error {
conn, err := systemd.ConnectToDBUS()
if err != nil {
- return errors.Wrapf(err, "unable to get systemd connection to add healthchecks")
+ return fmt.Errorf("unable to get systemd connection to add healthchecks: %w", err)
}
conn.Close()
logrus.Debugf("creating systemd-transient files: %s %s", "systemd-run", cmd)
systemdRun := exec.Command("systemd-run", cmd...)
if output, err := systemdRun.CombinedOutput(); err != nil {
- return errors.Errorf("%s", output)
+ return fmt.Errorf("%s", output)
}
return nil
}
+// Wait for a message on the channel. Throw an error if the message is not "done".
+func systemdOpSuccessful(c chan string) error {
+ msg := <-c
+ switch msg {
+ case "done":
+ return nil
+ default:
+ return fmt.Errorf("expected %q but received %q", "done", msg)
+ }
+}
+
// startTimer starts a systemd timer for the healthchecks
func (c *Container) startTimer() error {
if c.disableHealthCheckSystemd() {
@@ -53,11 +64,20 @@ func (c *Container) startTimer() error {
}
conn, err := systemd.ConnectToDBUS()
if err != nil {
- return errors.Wrapf(err, "unable to get systemd connection to start healthchecks")
+ return fmt.Errorf("unable to get systemd connection to start healthchecks: %w", err)
}
defer conn.Close()
- _, err = conn.StartUnitContext(context.Background(), fmt.Sprintf("%s.service", c.ID()), "fail", nil)
- return err
+
+ startFile := fmt.Sprintf("%s.service", c.ID())
+ startChan := make(chan string)
+ if _, err := conn.StartUnitContext(context.Background(), startFile, "fail", startChan); err != nil {
+ return err
+ }
+ if err := systemdOpSuccessful(startChan); err != nil {
+ return fmt.Errorf("starting systemd health-check timer %q: %w", startFile, err)
+ }
+
+ return nil
}
// removeTransientFiles removes the systemd timer and unit files
@@ -68,33 +88,40 @@ func (c *Container) removeTransientFiles(ctx context.Context) error {
}
conn, err := systemd.ConnectToDBUS()
if err != nil {
- return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks")
+ return fmt.Errorf("unable to get systemd connection to remove healthchecks: %w", err)
}
defer conn.Close()
+
+ // Errors are returned at the very end. Let's make sure to stop and
+ // clean up as much as possible.
+ stopErrors := []error{}
+
+ // Stop the timer before the service to make sure the timer does not
+ // fire after the service is stopped.
+ timerChan := make(chan string)
timerFile := fmt.Sprintf("%s.timer", c.ID())
- serviceFile := fmt.Sprintf("%s.service", c.ID())
+ if _, err := conn.StopUnitContext(ctx, timerFile, "fail", timerChan); err != nil {
+ if !strings.HasSuffix(err.Error(), ".timer not loaded.") {
+ stopErrors = append(stopErrors, fmt.Errorf("removing health-check timer %q: %w", timerFile, err))
+ }
+ } else if err := systemdOpSuccessful(timerChan); err != nil {
+ stopErrors = append(stopErrors, fmt.Errorf("stopping systemd health-check timer %q: %w", timerFile, err))
+ }
- // If the service has failed (the healthcheck has failed), then
- // the .service file is not removed on stopping the unit file. If
- // we check the properties of the service, it will automatically
- // reset the state. But checking the state takes msecs vs usecs to
- // blindly call reset.
+ // Reset the service before stopping it to make sure it's being removed
+ // on stop.
+ serviceChan := make(chan string)
+ serviceFile := fmt.Sprintf("%s.service", c.ID())
if err := conn.ResetFailedUnitContext(ctx, serviceFile); err != nil {
- logrus.Debugf("failed to reset unit file: %q", err)
+ logrus.Debugf("Failed to reset unit file: %q", err)
}
-
- // We want to ignore errors where the timer unit and/or service unit has already
- // been removed. The error return is generic so we have to check against the
- // string in the error
- if _, err = conn.StopUnitContext(ctx, serviceFile, "fail", nil); err != nil {
+ if _, err := conn.StopUnitContext(ctx, serviceFile, "fail", serviceChan); err != nil {
if !strings.HasSuffix(err.Error(), ".service not loaded.") {
- return errors.Wrapf(err, "unable to remove service file")
- }
- }
- if _, err = conn.StopUnitContext(ctx, timerFile, "fail", nil); err != nil {
- if strings.HasSuffix(err.Error(), ".timer not loaded.") {
- return nil
+ stopErrors = append(stopErrors, fmt.Errorf("removing health-check service %q: %w", serviceFile, err))
}
+ } else if err := systemdOpSuccessful(serviceChan); err != nil {
+ stopErrors = append(stopErrors, fmt.Errorf("stopping systemd health-check service %q: %w", serviceFile, err))
}
- return err
+
+ return errorhandling.JoinErrors(stopErrors)
}
diff --git a/libpod/info.go b/libpod/info.go
index bc49a6cc9..c4193b40d 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -3,6 +3,7 @@ package libpod
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"math"
@@ -25,7 +26,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/selinux/go-selinux"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -34,20 +34,20 @@ func (r *Runtime) info() (*define.Info, error) {
info := define.Info{}
versionInfo, err := define.GetVersion()
if err != nil {
- return nil, errors.Wrapf(err, "error getting version info")
+ return nil, fmt.Errorf("error getting version info: %w", err)
}
info.Version = versionInfo
// get host information
hostInfo, err := r.hostInfo()
if err != nil {
- return nil, errors.Wrapf(err, "error getting host info")
+ return nil, fmt.Errorf("error getting host info: %w", err)
}
info.Host = hostInfo
// get store information
storeInfo, err := r.storeInfo()
if err != nil {
- return nil, errors.Wrapf(err, "error getting store info")
+ return nil, fmt.Errorf("error getting store info: %w", err)
}
info.Store = storeInfo
registries := make(map[string]interface{})
@@ -55,14 +55,14 @@ func (r *Runtime) info() (*define.Info, error) {
sys := r.SystemContext()
data, err := sysregistriesv2.GetRegistries(sys)
if err != nil {
- return nil, errors.Wrapf(err, "error getting registries")
+ return nil, fmt.Errorf("error getting registries: %w", err)
}
for _, reg := range data {
registries[reg.Prefix] = reg
}
regs, err := sysregistriesv2.UnqualifiedSearchRegistries(sys)
if err != nil {
- return nil, errors.Wrapf(err, "error getting registries")
+ return nil, fmt.Errorf("error getting registries: %w", err)
}
if len(regs) > 0 {
registries["search"] = regs
@@ -86,36 +86,36 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
// lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime
mi, err := system.ReadMemInfo()
if err != nil {
- return nil, errors.Wrapf(err, "error reading memory info")
+ return nil, fmt.Errorf("error reading memory info: %w", err)
}
hostDistributionInfo := r.GetHostDistributionInfo()
kv, err := readKernelVersion()
if err != nil {
- return nil, errors.Wrapf(err, "error reading kernel version")
+ return nil, fmt.Errorf("error reading kernel version: %w", err)
}
host, err := os.Hostname()
if err != nil {
- return nil, errors.Wrapf(err, "error getting hostname")
+ return nil, fmt.Errorf("error getting hostname: %w", err)
}
seccompProfilePath, err := DefaultSeccompPath()
if err != nil {
- return nil, errors.Wrapf(err, "error getting Seccomp profile path")
+ return nil, fmt.Errorf("error getting Seccomp profile path: %w", err)
}
// Cgroups version
unified, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
- return nil, errors.Wrapf(err, "error reading cgroups mode")
+ return nil, fmt.Errorf("error reading cgroups mode: %w", err)
}
// Get Map of all available controllers
availableControllers, err := cgroups.GetAvailableControllers(nil, unified)
if err != nil {
- return nil, errors.Wrapf(err, "error getting available cgroup controllers")
+ return nil, fmt.Errorf("error getting available cgroup controllers: %w", err)
}
cpuUtil, err := getCPUUtilization()
if err != nil {
@@ -178,11 +178,11 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
if rootless.IsRootless() {
uidmappings, err := rootless.ReadMappingsProc("/proc/self/uid_map")
if err != nil {
- return nil, errors.Wrapf(err, "error reading uid mappings")
+ return nil, fmt.Errorf("error reading uid mappings: %w", err)
}
gidmappings, err := rootless.ReadMappingsProc("/proc/self/gid_map")
if err != nil {
- return nil, errors.Wrapf(err, "error reading gid mappings")
+ return nil, fmt.Errorf("error reading gid mappings: %w", err)
}
idmappings := define.IDMappings{
GIDMap: gidmappings,
@@ -199,50 +199,38 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
info.OCIRuntime = ociruntimeInfo
}
- up, err := readUptime()
+ duration, err := procUptime()
if err != nil {
- return nil, errors.Wrapf(err, "error reading up time")
+ return nil, fmt.Errorf("error reading up time: %w", err)
}
- // Convert uptime in seconds to a human-readable format
- upSeconds := up + "s"
- upDuration, err := time.ParseDuration(upSeconds)
- if err != nil {
- return nil, errors.Wrapf(err, "error parsing system uptime")
- }
-
- // TODO Isn't there a simple lib for this, something like humantime?
- hoursFound := false
- var timeBuffer bytes.Buffer
- var hoursBuffer bytes.Buffer
- for _, elem := range upDuration.String() {
- timeBuffer.WriteRune(elem)
- if elem == 'h' || elem == 'm' {
- timeBuffer.WriteRune(' ')
- if elem == 'h' {
- hoursFound = true
- }
- }
- if !hoursFound {
- hoursBuffer.WriteRune(elem)
- }
+
+ uptime := struct {
+ hours float64
+ minutes float64
+ seconds float64
+ }{
+ hours: duration.Truncate(time.Hour).Hours(),
+ minutes: duration.Truncate(time.Minute).Minutes(),
+ seconds: duration.Truncate(time.Second).Seconds(),
}
- info.Uptime = timeBuffer.String()
- if hoursFound {
- hours, err := strconv.ParseFloat(hoursBuffer.String(), 64)
- if err == nil {
- days := hours / 24
- info.Uptime = fmt.Sprintf("%s (Approximately %.2f days)", info.Uptime, days)
- }
+ // Could not find a humanize-formatter for time.Duration
+ var buffer bytes.Buffer
+ buffer.WriteString(fmt.Sprintf("%.0fh %.0fm %.2fs",
+ uptime.hours,
+ math.Mod(uptime.seconds, 3600)/60,
+ math.Mod(uptime.seconds, 60),
+ ))
+ if int64(uptime.hours) > 0 {
+ buffer.WriteString(fmt.Sprintf(" (Approximately %.2f days)", uptime.hours/24))
}
+ info.Uptime = buffer.String()
return &info, nil
}
func (r *Runtime) getContainerStoreInfo() (define.ContainerStore, error) {
- var (
- paused, running, stopped int
- )
+ var paused, running, stopped int
cs := define.ContainerStore{}
cons, err := r.GetAllContainers()
if err != nil {
@@ -252,7 +240,7 @@ func (r *Runtime) getContainerStoreInfo() (define.ContainerStore, error) {
for _, con := range cons {
state, err := con.State()
if err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
// container was probably removed
cs.Number--
continue
@@ -283,7 +271,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) {
}
images, err := r.store.Images()
if err != nil {
- return nil, errors.Wrapf(err, "error getting number of images")
+ return nil, fmt.Errorf("error getting number of images: %w", err)
}
conInfo, err := r.getContainerStoreInfo()
if err != nil {
@@ -293,7 +281,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) {
var grStats syscall.Statfs_t
if err := syscall.Statfs(r.store.GraphRoot(), &grStats); err != nil {
- return nil, errors.Wrapf(err, "unable to collect graph root usasge for %q", r.store.GraphRoot())
+ return nil, fmt.Errorf("unable to collect graph root usasge for %q: %w", r.store.GraphRoot(), err)
}
allocated := uint64(grStats.Bsize) * grStats.Blocks
info := define.StoreInfo{
@@ -353,16 +341,17 @@ func readKernelVersion() (string, error) {
return string(f[2]), nil
}
-func readUptime() (string, error) {
+func procUptime() (time.Duration, error) {
+ var zero time.Duration
buf, err := ioutil.ReadFile("/proc/uptime")
if err != nil {
- return "", err
+ return zero, err
}
f := bytes.Fields(buf)
if len(f) < 1 {
- return "", fmt.Errorf("invalid uptime")
+ return zero, errors.New("unable to parse uptime from /proc/uptime")
}
- return string(f[0]), nil
+ return time.ParseDuration(string(f[0]) + "s")
}
// GetHostDistributionInfo returns a map containing the host's distribution and version
@@ -418,15 +407,15 @@ func getCPUUtilization() (*define.CPUUsage, error) {
func statToPercent(stats []string) (*define.CPUUsage, error) {
userTotal, err := strconv.ParseFloat(stats[1], 64)
if err != nil {
- return nil, errors.Wrapf(err, "unable to parse user value %q", stats[1])
+ return nil, fmt.Errorf("unable to parse user value %q: %w", stats[1], err)
}
systemTotal, err := strconv.ParseFloat(stats[3], 64)
if err != nil {
- return nil, errors.Wrapf(err, "unable to parse system value %q", stats[3])
+ return nil, fmt.Errorf("unable to parse system value %q: %w", stats[3], err)
}
idleTotal, err := strconv.ParseFloat(stats[4], 64)
if err != nil {
- return nil, errors.Wrapf(err, "unable to parse idle value %q", stats[4])
+ return nil, fmt.Errorf("unable to parse idle value %q: %w", stats[4], err)
}
total := userTotal + systemTotal + idleTotal
s := define.CPUUsage{
diff --git a/libpod/kube.go b/libpod/kube.go
index 5a5fe9d35..3cb0489b3 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"math/rand"
"os"
@@ -14,6 +15,7 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/env"
v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1"
@@ -26,7 +28,6 @@ import (
"github.com/containers/podman/v4/pkg/util"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -42,8 +43,8 @@ func GenerateForKube(ctx context.Context, ctrs []*Container) (*v1.Pod, error) {
func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, error) {
// Generate the v1.Pod yaml description
var (
- ports []v1.ContainerPort //nolint
- servicePorts []v1.ServicePort //nolint
+ ports []v1.ContainerPort
+ servicePorts []v1.ServicePort
)
allContainers, err := p.allContainers()
@@ -52,11 +53,11 @@ func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, e
}
// If the pod has no containers, no sense to generate YAML
if len(allContainers) == 0 {
- return nil, servicePorts, errors.Errorf("pod %s has no containers", p.ID())
+ return nil, servicePorts, fmt.Errorf("pod %s has no containers", p.ID())
}
// If only an infra container is present, makes no sense to generate YAML
if len(allContainers) == 1 && p.HasInfraContainer() {
- return nil, servicePorts, errors.Errorf("pod %s only has an infra container", p.ID())
+ return nil, servicePorts, fmt.Errorf("pod %s only has an infra container", p.ID())
}
extraHost := make([]v1.HostAlias, 0)
@@ -515,7 +516,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
podDNS.Nameservers = make([]string, 0)
}
for _, s := range servers {
- if !util.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist
+ if !cutil.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist
podDNS.Nameservers = append(podDNS.Nameservers, s)
}
}
@@ -526,7 +527,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
podDNS.Searches = make([]string, 0)
}
for _, d := range domains {
- if !util.StringInSlice(d, podDNS.Searches) { // only append if it does not exist
+ if !cutil.StringInSlice(d, podDNS.Searches) { // only append if it does not exist
podDNS.Searches = append(podDNS.Searches, d)
}
}
@@ -543,7 +544,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod,
podName := removeUnderscores(ctrs[0].Name())
// Check if the pod name and container name will end up conflicting
// Append -pod if so
- if util.StringInSlice(podName, ctrNames) {
+ if cutil.StringInSlice(podName, ctrNames) {
podName += "-pod"
}
@@ -572,7 +573,7 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
if !c.Privileged() && len(c.config.Spec.Linux.Devices) > 0 {
// TODO Enable when we can support devices and their names
kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.config.Spec.Linux.Devices)
- return kubeContainer, kubeVolumes, nil, annotations, errors.Wrapf(define.ErrNotImplemented, "linux devices")
+ return kubeContainer, kubeVolumes, nil, annotations, fmt.Errorf("linux devices: %w", define.ErrNotImplemented)
}
if len(c.config.UserVolumes) > 0 {
@@ -742,7 +743,7 @@ func portMappingToContainerPort(portMappings []types.PortMapping) ([]v1.Containe
case "SCTP":
protocol = v1.ProtocolSCTP
default:
- return containerPorts, errors.Errorf("unknown network protocol %s", p.Protocol)
+ return containerPorts, fmt.Errorf("unknown network protocol %s", p.Protocol)
}
for i := uint16(0); i < p.Range; i++ {
cp := v1.ContainerPort{
@@ -771,7 +772,7 @@ func libpodEnvVarsToKubeEnvVars(envs []string, imageEnvs []string) ([]v1.EnvVar,
for _, e := range envs {
split := strings.SplitN(e, "=", 2)
if len(split) != 2 {
- return envVars, errors.Errorf("environment variable %s is malformed; should be key=value", e)
+ return envVars, fmt.Errorf("environment variable %s is malformed; should be key=value", e)
}
if defaultEnv[split[0]] == split[1] {
continue
@@ -824,7 +825,7 @@ func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume
// generateKubePersistentVolumeClaim converts a ContainerNamedVolume to a Kubernetes PersistentVolumeClaim
func generateKubePersistentVolumeClaim(v *ContainerNamedVolume) (v1.VolumeMount, v1.Volume) {
- ro := util.StringInSlice("ro", v.Options)
+ ro := cutil.StringInSlice("ro", v.Options)
// To avoid naming conflicts with any host path mounts, add a unique suffix to the volume's name.
name := v.Name + "-pvc"
@@ -857,7 +858,7 @@ func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) {
name += "-host"
vm.Name = name
vm.MountPath = m.Destination
- if util.StringInSlice("ro", m.Options) {
+ if cutil.StringInSlice("ro", m.Options) {
vm.ReadOnly = true
}
@@ -891,11 +892,11 @@ func isHostPathDirectory(hostPathSource string) (bool, error) {
func convertVolumePathToName(hostSourcePath string) (string, error) {
if len(hostSourcePath) == 0 {
- return "", errors.Errorf("hostSourcePath must be specified to generate volume name")
+ return "", errors.New("hostSourcePath must be specified to generate volume name")
}
if len(hostSourcePath) == 1 {
if hostSourcePath != "/" {
- return "", errors.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
+ return "", fmt.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
}
// add special case name
return "root", nil
@@ -915,7 +916,7 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
// Find caps in the defaultCaps but not in the container's
// those indicate a dropped cap
for _, capability := range defaultCaps {
- if !util.StringInSlice(capability, containerCaps) {
+ if !cutil.StringInSlice(capability, containerCaps) {
if _, ok := dedupDrop[capability]; !ok {
drop = append(drop, v1.Capability(capability))
dedupDrop[capability] = true
@@ -925,7 +926,7 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
// Find caps in the container but not in the defaults; those indicate
// an added cap
for _, capability := range containerCaps {
- if !util.StringInSlice(capability, defaultCaps) {
+ if !cutil.StringInSlice(capability, defaultCaps) {
if _, ok := dedupAdd[capability]; !ok {
add = append(add, v1.Capability(capability))
dedupAdd[capability] = true
@@ -1024,7 +1025,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
defer c.lock.Unlock()
}
if err := c.syncContainer(); err != nil {
- return nil, errors.Wrapf(err, "unable to sync container during YAML generation")
+ return nil, fmt.Errorf("unable to sync container during YAML generation: %w", err)
}
mountpoint := c.state.Mountpoint
@@ -1032,7 +1033,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
var err error
mountpoint, err = c.mount()
if err != nil {
- return nil, errors.Wrapf(err, "failed to mount %s mountpoint", c.ID())
+ return nil, fmt.Errorf("failed to mount %s mountpoint: %w", c.ID(), err)
}
defer func() {
if err := c.unmount(false); err != nil {
diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go
index 4685872b6..1379e690a 100644
--- a/libpod/lock/file/file_lock.go
+++ b/libpod/lock/file/file_lock.go
@@ -1,6 +1,7 @@
package file
import (
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -8,13 +9,12 @@ import (
"syscall"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// FileLocks is a struct enabling POSIX lock locking in a shared memory
// segment.
-type FileLocks struct { // nolint
+type FileLocks struct { //nolint:revive // struct name stutters
lockPath string
valid bool
}
@@ -23,7 +23,7 @@ type FileLocks struct { // nolint
func CreateFileLock(path string) (*FileLocks, error) {
_, err := os.Stat(path)
if err == nil {
- return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path)
+ return nil, fmt.Errorf("directory %s exists: %w", path, syscall.EEXIST)
}
if err := os.MkdirAll(path, 0711); err != nil {
return nil, err
@@ -57,11 +57,11 @@ func OpenFileLock(path string) (*FileLocks, error) {
// Close() is only intended to be used while testing the locks.
func (locks *FileLocks) Close() error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
err := os.RemoveAll(locks.lockPath)
if err != nil {
- return errors.Wrapf(err, "deleting directory %s", locks.lockPath)
+ return fmt.Errorf("deleting directory %s: %w", locks.lockPath, err)
}
return nil
}
@@ -73,7 +73,7 @@ func (locks *FileLocks) getLockPath(lck uint32) string {
// AllocateLock allocates a lock and returns the index of the lock that was allocated.
func (locks *FileLocks) AllocateLock() (uint32, error) {
if !locks.valid {
- return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
id := uint32(0)
@@ -84,7 +84,7 @@ func (locks *FileLocks) AllocateLock() (uint32, error) {
if os.IsExist(err) {
continue
}
- return 0, errors.Wrap(err, "creating lock file")
+ return 0, fmt.Errorf("creating lock file: %w", err)
}
f.Close()
break
@@ -98,12 +98,12 @@ func (locks *FileLocks) AllocateLock() (uint32, error) {
// returned.
func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
- return errors.Wrapf(err, "error creating lock %d", lck)
+ return fmt.Errorf("error creating lock %d: %w", lck, err)
}
f.Close()
@@ -115,10 +115,10 @@ func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
// The given lock must be already allocated, or an error will be returned.
func (locks *FileLocks) DeallocateLock(lck uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
if err := os.Remove(locks.getLockPath(lck)); err != nil {
- return errors.Wrapf(err, "deallocating lock %d", lck)
+ return fmt.Errorf("deallocating lock %d: %w", lck, err)
}
return nil
}
@@ -127,11 +127,11 @@ func (locks *FileLocks) DeallocateLock(lck uint32) error {
// other containers and pods.
func (locks *FileLocks) DeallocateAllLocks() error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
files, err := ioutil.ReadDir(locks.lockPath)
if err != nil {
- return errors.Wrapf(err, "error reading directory %s", locks.lockPath)
+ return fmt.Errorf("error reading directory %s: %w", locks.lockPath, err)
}
var lastErr error
for _, f := range files {
@@ -148,12 +148,12 @@ func (locks *FileLocks) DeallocateAllLocks() error {
// LockFileLock locks the given lock.
func (locks *FileLocks) LockFileLock(lck uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
l, err := storage.GetLockfile(locks.getLockPath(lck))
if err != nil {
- return errors.Wrapf(err, "error acquiring lock")
+ return fmt.Errorf("error acquiring lock: %w", err)
}
l.Lock()
@@ -163,11 +163,11 @@ func (locks *FileLocks) LockFileLock(lck uint32) error {
// UnlockFileLock unlocks the given lock.
func (locks *FileLocks) UnlockFileLock(lck uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
l, err := storage.GetLockfile(locks.getLockPath(lck))
if err != nil {
- return errors.Wrapf(err, "error acquiring lock")
+ return fmt.Errorf("error acquiring lock: %w", err)
}
l.Unlock()
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index f7f47760c..f00f01032 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -1,9 +1,9 @@
package lock
import (
+ "errors"
+ "fmt"
"sync"
-
- "github.com/pkg/errors"
)
// Mutex holds a single mutex and whether it has been allocated.
@@ -49,7 +49,7 @@ type InMemoryManager struct {
// of locks.
func NewInMemoryManager(numLocks uint32) (Manager, error) {
if numLocks == 0 {
- return nil, errors.Errorf("must provide a non-zero number of locks")
+ return nil, errors.New("must provide a non-zero number of locks")
}
manager := new(InMemoryManager)
@@ -78,13 +78,13 @@ func (m *InMemoryManager) AllocateLock() (Locker, error) {
}
}
- return nil, errors.Errorf("all locks have been allocated")
+ return nil, errors.New("all locks have been allocated")
}
// RetrieveLock retrieves a lock from the manager.
func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
if id >= m.numLocks {
- return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks-1)
+ return nil, fmt.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks-1)
}
return m.locks[id], nil
@@ -94,11 +94,11 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
// use) and returns it.
func (m *InMemoryManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
if id >= m.numLocks {
- return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
+ return nil, fmt.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
}
if m.locks[id].allocated {
- return nil, errors.Errorf("given lock ID %d is already in use, cannot reallocate", id)
+ return nil, fmt.Errorf("given lock ID %d is already in use, cannot reallocate", id)
}
m.locks[id].allocated = true
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index c7f4d1bc5..3334a4018 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -11,11 +11,12 @@ package shm
import "C"
import (
+ "errors"
+ "fmt"
"runtime"
"syscall"
"unsafe"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -28,7 +29,7 @@ var (
// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
// segment.
-type SHMLocks struct { // nolint
+type SHMLocks struct {
lockStruct *C.shm_struct_t
maxLocks uint32
valid bool
@@ -40,7 +41,7 @@ type SHMLocks struct { // nolint
// size used by the underlying implementation.
func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
if numLocks == 0 {
- return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
+ return nil, fmt.Errorf("number of locks must be greater than 0: %w", syscall.EINVAL)
}
locks := new(SHMLocks)
@@ -52,7 +53,7 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
if lockStruct == nil {
// We got a null pointer, so something errored
- return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to create %d locks in %s", numLocks, path)
+ return nil, fmt.Errorf("failed to create %d locks in %s: %w", numLocks, path, syscall.Errno(-1*errCode))
}
locks.lockStruct = lockStruct
@@ -69,7 +70,7 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
// segment was created with.
func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
if numLocks == 0 {
- return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
+ return nil, fmt.Errorf("number of locks must be greater than 0: %w", syscall.EINVAL)
}
locks := new(SHMLocks)
@@ -81,7 +82,7 @@ func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
if lockStruct == nil {
// We got a null pointer, so something errored
- return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to open %d locks in %s", numLocks, path)
+ return nil, fmt.Errorf("failed to open %d locks in %s: %w", numLocks, path, syscall.Errno(-1*errCode))
}
locks.lockStruct = lockStruct
@@ -103,7 +104,7 @@ func (locks *SHMLocks) GetMaxLocks() uint32 {
// Close() is only intended to be used while testing the locks.
func (locks *SHMLocks) Close() error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
locks.valid = false
@@ -124,7 +125,7 @@ func (locks *SHMLocks) Close() error {
// created will result in an error, and no semaphore will be allocated.
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
if !locks.valid {
- return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
// This returns a U64, so we have the full u32 range available for
@@ -138,7 +139,7 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
// that there's no room in the SHM inn for this lock, this tends to send normal people
// down the path of checking disk-space which is not actually their problem.
// Give a clue that it's actually due to num_locks filling up.
- var errFull = errors.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks)
+ var errFull = fmt.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks)
return uint32(retCode), errFull
}
return uint32(retCode), syscall.Errno(-1 * retCode)
@@ -153,7 +154,7 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
// returned.
func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
@@ -169,11 +170,11 @@ func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
// The given semaphore must be already allocated, or an error will be returned.
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
if sem > locks.maxLocks {
- return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
}
retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
@@ -189,7 +190,7 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
// other containers and pods.
func (locks *SHMLocks) DeallocateAllSemaphores() error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
retCode := C.deallocate_all_semaphores(locks.lockStruct)
@@ -210,11 +211,11 @@ func (locks *SHMLocks) DeallocateAllSemaphores() error {
// succeed.
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
if sem > locks.maxLocks {
- return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
}
// For pthread mutexes, we have to guarantee lock and unlock happen in
@@ -238,11 +239,11 @@ func (locks *SHMLocks) LockSemaphore(sem uint32) error {
// succeed.
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
if !locks.valid {
- return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
}
if sem > locks.maxLocks {
- return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
}
retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 3076cd864..fa20bc353 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -4,10 +4,10 @@
package lock
import (
+ "fmt"
"syscall"
"github.com/containers/podman/v4/libpod/lock/shm"
- "github.com/pkg/errors"
)
// SHMLockManager manages shared memory locks.
@@ -66,8 +66,8 @@ func (m *SHMLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
lock.manager = m
if id >= m.locks.GetMaxLocks() {
- return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
- id, m.locks.GetMaxLocks()-1)
+ return nil, fmt.Errorf("lock ID %d is too large - max lock size is %d: %w",
+ id, m.locks.GetMaxLocks()-1, syscall.EINVAL)
}
if err := m.locks.AllocateGivenSemaphore(id); err != nil {
@@ -84,8 +84,8 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
lock.manager = m
if id >= m.locks.GetMaxLocks() {
- return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
- id, m.locks.GetMaxLocks()-1)
+ return nil, fmt.Errorf("lock ID %d is too large - max lock size is %d: %w",
+ id, m.locks.GetMaxLocks()-1, syscall.EINVAL)
}
return lock, nil
diff --git a/libpod/logs/log.go b/libpod/logs/log.go
index 4d7d5ac58..43da8d904 100644
--- a/libpod/logs/log.go
+++ b/libpod/logs/log.go
@@ -1,6 +1,7 @@
package logs
import (
+ "errors"
"fmt"
"io"
"os"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/libpod/logs/reversereader"
"github.com/nxadm/tail"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -105,7 +105,7 @@ func getTailLog(path string, tail int) ([]*LogLine, error) {
for {
s, err := rr.Read()
if err != nil {
- if errors.Cause(err) == io.EOF {
+ if errors.Is(err, io.EOF) {
inputs <- []string{leftover}
} else {
logrus.Error(err)
@@ -228,11 +228,11 @@ func (l *LogLine) Until(until time.Time) bool {
func NewLogLine(line string) (*LogLine, error) {
splitLine := strings.Split(line, " ")
if len(splitLine) < 4 {
- return nil, errors.Errorf("'%s' is not a valid container log line", line)
+ return nil, fmt.Errorf("'%s' is not a valid container log line", line)
}
logTime, err := time.Parse(LogTimeFormat, splitLine[0])
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
+ return nil, fmt.Errorf("unable to convert time %s from container log: %w", splitLine[0], err)
}
l := LogLine{
Time: logTime,
@@ -249,11 +249,11 @@ func NewLogLine(line string) (*LogLine, error) {
func NewJournaldLogLine(line string, withID bool) (*LogLine, error) {
splitLine := strings.Split(line, " ")
if len(splitLine) < 4 {
- return nil, errors.Errorf("'%s' is not a valid container log line", line)
+ return nil, fmt.Errorf("'%s' is not a valid container log line", line)
}
logTime, err := time.Parse(LogTimeFormat, splitLine[0])
if err != nil {
- return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
+ return nil, fmt.Errorf("unable to convert time %s from container log: %w", splitLine[0], err)
}
var msg, id string
if withID {
diff --git a/libpod/logs/reversereader/reversereader.go b/libpod/logs/reversereader/reversereader.go
index 4fa1a3f88..f2e71fb61 100644
--- a/libpod/logs/reversereader/reversereader.go
+++ b/libpod/logs/reversereader/reversereader.go
@@ -1,10 +1,10 @@
package reversereader
import (
+ "errors"
+ "fmt"
"io"
"os"
-
- "github.com/pkg/errors"
)
// ReverseReader structure for reading a file backwards
@@ -49,12 +49,12 @@ func NewReverseReader(reader *os.File) (*ReverseReader, error) {
// then sets the newoff set one pagesize less than the previous read.
func (r *ReverseReader) Read() (string, error) {
if r.offset < 0 {
- return "", errors.Wrap(io.EOF, "at beginning of file")
+ return "", fmt.Errorf("at beginning of file: %w", io.EOF)
}
// Read from given offset
b := make([]byte, r.readSize)
n, err := r.reader.ReadAt(b, r.offset)
- if err != nil && errors.Cause(err) != io.EOF {
+ if err != nil && !errors.Is(err, io.EOF) {
return "", err
}
if int64(n) < r.readSize {
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 0c124cf0b..c05796768 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -6,6 +6,7 @@ package libpod
import (
"crypto/rand"
"crypto/sha256"
+ "errors"
"fmt"
"io/ioutil"
"net"
@@ -21,22 +22,21 @@ import (
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/common/libnetwork/etchosts"
+ "github.com/containers/common/libnetwork/resolvconf"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/machine"
"github.com/containers/common/pkg/netns"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/namespaces"
- "github.com/containers/podman/v4/pkg/resolvconf"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/containers/podman/v4/pkg/util"
"github.com/containers/podman/v4/utils"
"github.com/containers/storage/pkg/lockfile"
- spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
@@ -109,7 +109,7 @@ func (r *RootlessNetNS) getPath(path string) string {
func (r *RootlessNetNS) Do(toRun func() error) error {
err := r.ns.Do(func(_ ns.NetNS) error {
// Before we can run the given function,
- // we have to setup all mounts correctly.
+ // we have to set up all mounts correctly.
// The order of the mounts is IMPORTANT.
// The idea of the extra mount ns is to make /run and /var/lib/cni writeable
@@ -127,19 +127,19 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
// this must happen inside the netns thread.
err := unix.Unshare(unix.CLONE_NEWNS)
if err != nil {
- return errors.Wrapf(err, "cannot create a new mount namespace")
+ return fmt.Errorf("cannot create a new mount namespace: %w", err)
}
xdgRuntimeDir, err := util.GetRuntimeDir()
if err != nil {
- return errors.Wrap(err, "could not get runtime directory")
+ return fmt.Errorf("could not get runtime directory: %w", err)
}
newXDGRuntimeDir := r.getPath(xdgRuntimeDir)
// 1. Mount the netns into the new run to keep them accessible.
// Otherwise cni setup will fail because it cannot access the netns files.
err = unix.Mount(xdgRuntimeDir, newXDGRuntimeDir, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "")
if err != nil {
- return errors.Wrap(err, "failed to mount runtime directory for rootless netns")
+ return fmt.Errorf("failed to mount runtime directory for rootless netns: %w", err)
}
// 2. Also keep /run/systemd if it exists.
@@ -150,7 +150,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
newRunSystemd := r.getPath(runSystemd)
err = unix.Mount(runSystemd, newRunSystemd, "none", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
- return errors.Wrap(err, "failed to mount /run/systemd directory for rootless netns")
+ return fmt.Errorf("failed to mount /run/systemd directory for rootless netns: %w", err)
}
}
@@ -185,7 +185,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
fi, err := os.Lstat(path)
if err != nil {
- return errors.Wrap(err, "failed to stat resolv.conf path")
+ return fmt.Errorf("failed to stat resolv.conf path: %w", err)
}
// no link, just continue
@@ -195,7 +195,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
link, err := os.Readlink(path)
if err != nil {
- return errors.Wrap(err, "failed to read resolv.conf symlink")
+ return fmt.Errorf("failed to read resolv.conf symlink: %w", err)
}
linkCount++
if filepath.IsAbs(link) {
@@ -231,25 +231,25 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
rsr := r.getPath("/run/systemd/resolve")
err = unix.Mount("", rsr, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, "")
if err != nil {
- return errors.Wrapf(err, "failed to mount tmpfs on %q for rootless netns", rsr)
+ return fmt.Errorf("failed to mount tmpfs on %q for rootless netns: %w", rsr, err)
}
}
if strings.HasPrefix(resolvePath, "/run/") {
resolvePath = r.getPath(resolvePath)
err = os.MkdirAll(filepath.Dir(resolvePath), 0700)
if err != nil {
- return errors.Wrap(err, "failed to create rootless-netns resolv.conf directory")
+ return fmt.Errorf("failed to create rootless-netns resolv.conf directory: %w", err)
}
// we want to bind mount on this file so we have to create the file first
_, err = os.OpenFile(resolvePath, os.O_CREATE|os.O_RDONLY, 0700)
if err != nil {
- return errors.Wrap(err, "failed to create rootless-netns resolv.conf file")
+ return fmt.Errorf("failed to create rootless-netns resolv.conf file: %w", err)
}
}
// mount resolv.conf to make use of the host dns
err = unix.Mount(r.getPath("resolv.conf"), resolvePath, "none", unix.MS_BIND, "")
if err != nil {
- return errors.Wrap(err, "failed to mount resolv.conf for rootless netns")
+ return fmt.Errorf("failed to mount resolv.conf for rootless netns: %w", err)
}
// 4. CNI plugins need access to /var/lib/cni and /run
@@ -274,14 +274,14 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
// make sure to mount var first
err = unix.Mount(varDir, varTarget, "none", unix.MS_BIND, "")
if err != nil {
- return errors.Wrapf(err, "failed to mount %s for rootless netns", varTarget)
+ return fmt.Errorf("failed to mount %s for rootless netns: %w", varTarget, err)
}
// 5. Mount the new prepared run dir to /run, it has to be recursive to keep the other bind mounts.
runDir := r.getPath("run")
err = unix.Mount(runDir, "/run", "none", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
- return errors.Wrap(err, "failed to mount /run for rootless netns")
+ return fmt.Errorf("failed to mount /run for rootless netns: %w", err)
}
// run the given function in the correct namespace
@@ -291,7 +291,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
return err
}
-// Cleanup the rootless network namespace if needed.
+// Clean up the rootless network namespace if needed.
// It checks if we have running containers with the bridge network mode.
// Cleanup() expects that r.Lock is locked
func (r *RootlessNetNS) Cleanup(runtime *Runtime) error {
@@ -377,7 +377,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
lfile := filepath.Join(runDir, "rootless-netns.lock")
lock, err := lockfile.GetLockfile(lfile)
if err != nil {
- return nil, errors.Wrap(err, "failed to get rootless-netns lockfile")
+ return nil, fmt.Errorf("failed to get rootless-netns lockfile: %w", err)
}
lock.Lock()
defer func() {
@@ -391,7 +391,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
rootlessNetNsDir := filepath.Join(runDir, rootlessNetNsName)
err = os.MkdirAll(rootlessNetNsDir, 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns directory")
+ return nil, fmt.Errorf("could not create rootless-netns directory: %w", err)
}
nsDir, err := netns.GetNSRunDir()
@@ -411,15 +411,15 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
if err != nil {
if !new {
// return a error if we could not get the namespace and should no create one
- return nil, errors.Wrap(err, "error getting rootless network namespace")
+ return nil, fmt.Errorf("error getting rootless network namespace: %w", err)
}
// create a new namespace
logrus.Debugf("creating rootless network namespace with name %q", netnsName)
ns, err = netns.NewNSWithName(netnsName)
if err != nil {
- return nil, errors.Wrap(err, "error creating rootless network namespace")
+ return nil, fmt.Errorf("error creating rootless network namespace: %w", err)
}
- // setup slirp4netns here
+ // set up slirp4netns here
path := r.config.Engine.NetworkCmdPath
if path == "" {
var err error
@@ -431,7 +431,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
syncR, syncW, err := os.Pipe()
if err != nil {
- return nil, errors.Wrapf(err, "failed to open pipe")
+ return nil, fmt.Errorf("failed to open pipe: %w", err)
}
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
@@ -442,7 +442,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
}
slirpFeatures, err := checkSlirpFlags(path)
if err != nil {
- return nil, errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err)
+ return nil, fmt.Errorf("error checking slirp4netns binary %s: %q: %w", path, err, err)
}
cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures)
if err != nil {
@@ -470,25 +470,25 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
logPath := filepath.Join(r.config.Engine.TmpDir, "slirp4netns-rootless-netns.log")
logFile, err := os.Create(logPath)
if err != nil {
- return nil, errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath)
+ return nil, fmt.Errorf("failed to open slirp4netns log file %s: %w", logPath, err)
}
defer logFile.Close()
// Unlink immediately the file so we won't need to worry about cleaning it up later.
// It is still accessible through the open fd logFile.
if err := os.Remove(logPath); err != nil {
- return nil, errors.Wrapf(err, "delete file %s", logPath)
+ return nil, fmt.Errorf("delete file %s: %w", logPath, err)
}
cmd.Stdout = logFile
cmd.Stderr = logFile
if err := cmd.Start(); err != nil {
- return nil, errors.Wrapf(err, "failed to start slirp4netns process")
+ return nil, fmt.Errorf("failed to start slirp4netns process: %w", err)
}
// create pid file for the slirp4netns process
// this is need to kill the process in the cleanup
pid := strconv.Itoa(cmd.Process.Pid)
err = ioutil.WriteFile(filepath.Join(rootlessNetNsDir, rootlessNetNsSilrp4netnsPidFile), []byte(pid), 0700)
if err != nil {
- return nil, errors.Wrap(err, "unable to write rootless-netns slirp4netns pid file")
+ return nil, fmt.Errorf("unable to write rootless-netns slirp4netns pid file: %w", err)
}
defer func() {
@@ -513,63 +513,59 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
// build a new resolv.conf file which uses the slirp4netns dns server address
resolveIP, err := GetSlirp4netnsDNS(nil)
if err != nil {
- return nil, errors.Wrap(err, "failed to determine default slirp4netns DNS address")
+ return nil, fmt.Errorf("failed to determine default slirp4netns DNS address: %w", err)
}
if netOptions.cidr != "" {
_, cidr, err := net.ParseCIDR(netOptions.cidr)
if err != nil {
- return nil, errors.Wrap(err, "failed to parse slirp4netns cidr")
+ return nil, fmt.Errorf("failed to parse slirp4netns cidr: %w", err)
}
resolveIP, err = GetSlirp4netnsDNS(cidr)
if err != nil {
- return nil, errors.Wrapf(err, "failed to determine slirp4netns DNS address from cidr: %s", cidr.String())
+ return nil, fmt.Errorf("failed to determine slirp4netns DNS address from cidr: %s: %w", cidr.String(), err)
}
}
- conf, err := resolvconf.Get()
- if err != nil {
- return nil, err
- }
- conf, err = resolvconf.FilterResolvDNS(conf.Content, netOptions.enableIPv6, true)
- if err != nil {
- return nil, err
- }
- searchDomains := resolvconf.GetSearchDomains(conf.Content)
- dnsOptions := resolvconf.GetOptions(conf.Content)
- nameServers := resolvconf.GetNameservers(conf.Content)
- _, err = resolvconf.Build(filepath.Join(rootlessNetNsDir, "resolv.conf"), append([]string{resolveIP.String()}, nameServers...), searchDomains, dnsOptions)
- if err != nil {
- return nil, errors.Wrap(err, "failed to create rootless netns resolv.conf")
+ if err := resolvconf.New(&resolvconf.Params{
+ Path: filepath.Join(rootlessNetNsDir, "resolv.conf"),
+ // fake the netns since we want to filter localhost
+ Namespaces: []specs.LinuxNamespace{
+ {Type: specs.NetworkNamespace},
+ },
+ IPv6Enabled: netOptions.enableIPv6,
+ KeepHostServers: true,
+ Nameservers: []string{resolveIP.String()},
+ }); err != nil {
+ return nil, fmt.Errorf("failed to create rootless netns resolv.conf: %w", err)
}
-
// create cni directories to store files
// they will be bind mounted to the correct location in a extra mount ns
err = os.MkdirAll(filepath.Join(rootlessNetNsDir, persistentCNIDir), 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns var directory")
+ return nil, fmt.Errorf("could not create rootless-netns var directory: %w", err)
}
runDir := filepath.Join(rootlessNetNsDir, "run")
err = os.MkdirAll(runDir, 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns run directory")
+ return nil, fmt.Errorf("could not create rootless-netns run directory: %w", err)
}
// relabel the new run directory to the iptables /run label
// this is important, otherwise the iptables command will fail
err = label.Relabel(runDir, "system_u:object_r:iptables_var_run_t:s0", false)
if err != nil {
- return nil, errors.Wrap(err, "could not create relabel rootless-netns run directory")
+ return nil, fmt.Errorf("could not create relabel rootless-netns run directory: %w", err)
}
// create systemd run directory
err = os.MkdirAll(filepath.Join(runDir, "systemd"), 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns systemd directory")
+ return nil, fmt.Errorf("could not create rootless-netns systemd directory: %w", err)
}
// create the directory for the netns files at the same location
// relative to the rootless-netns location
err = os.MkdirAll(filepath.Join(rootlessNetNsDir, nsDir), 0700)
if err != nil {
- return nil, errors.Wrap(err, "could not create rootless-netns netns directory")
+ return nil, fmt.Errorf("could not create rootless-netns netns directory: %w", err)
}
}
@@ -660,9 +656,9 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (status map[str
return nil, err
}
- // setup rootless port forwarder when rootless with ports and the network status is empty,
+ // set up rootless port forwarder when rootless with ports and the network status is empty,
// if this is called from network reload the network status will not be empty and we should
- // not setup port because they are still active
+ // not set up port because they are still active
if rootless.IsRootless() && len(ctr.config.PortMappings) > 0 && ctr.getNetworkStatus() == nil {
// set up port forwarder for rootless netns
netnsPath := ctrNS.Path()
@@ -679,7 +675,7 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (status map[str
func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q map[string]types.StatusBlock, retErr error) {
ctrNS, err := netns.NewNS()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating network namespace for container %s", ctr.ID())
+ return nil, nil, fmt.Errorf("error creating network namespace for container %s: %w", ctr.ID(), err)
}
defer func() {
if retErr != nil {
@@ -706,7 +702,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
b := make([]byte, 16)
if _, err := rand.Reader.Read(b); err != nil {
- return errors.Wrapf(err, "failed to generate random netns name")
+ return fmt.Errorf("failed to generate random netns name: %w", err)
}
nsPath, err := netns.GetNSRunDir()
if err != nil {
@@ -727,7 +723,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
}
if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
- return errors.Wrapf(err, "cannot mount %s", nsPath)
+ return fmt.Errorf("cannot mount %s: %w", nsPath, err)
}
netNS, err := ns.GetNS(nsPath)
@@ -746,7 +742,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
func joinNetNS(path string) (ns.NetNS, error) {
netNS, err := ns.GetNS(path)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path)
+ return nil, fmt.Errorf("error retrieving network namespace at %s: %w", path, err)
}
return netNS, nil
@@ -762,7 +758,7 @@ func (r *Runtime) closeNetNS(ctr *Container) error {
}
if err := ctr.state.NetNS.Close(); err != nil {
- return errors.Wrapf(err, "error closing network namespace for container %s", ctr.ID())
+ return fmt.Errorf("error closing network namespace for container %s: %w", ctr.ID(), err)
}
ctr.state.NetNS = nil
@@ -778,8 +774,10 @@ func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error {
return err
}
tearDownPod := func() error {
- err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts})
- return errors.Wrapf(err, "error tearing down network namespace configuration for container %s", opts.ContainerID)
+ if err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts}); err != nil {
+ return fmt.Errorf("error tearing down network namespace configuration for container %s: %w", opts.ContainerID, err)
+ }
+ return nil
}
// rootlessNetNS is nil if we are root
@@ -787,7 +785,7 @@ func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error {
// execute the cni setup in the rootless net ns
err = rootlessNetNS.Do(tearDownPod)
if cerr := rootlessNetNS.Cleanup(r); cerr != nil {
- logrus.WithError(err).Error("failed to cleanup rootless netns")
+ logrus.WithError(err).Error("failed to clean up rootless netns")
}
rootlessNetNS.Lock.Unlock()
} else {
@@ -830,12 +828,12 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
// First unmount the namespace
if err := netns.UnmountNS(ctr.state.NetNS); err != nil {
- return errors.Wrapf(err, "error unmounting network namespace for container %s", ctr.ID())
+ return fmt.Errorf("error unmounting network namespace for container %s: %w", ctr.ID(), err)
}
// Now close the open file descriptor
if err := ctr.state.NetNS.Close(); err != nil {
- return errors.Wrapf(err, "error closing network namespace for container %s", ctr.ID())
+ return fmt.Errorf("error closing network namespace for container %s: %w", ctr.ID(), err)
}
ctr.state.NetNS = nil
@@ -868,7 +866,7 @@ func getContainerNetNS(ctr *Container) (string, *Container, error) {
// It returns nil when it is set to bridge and an error otherwise.
func isBridgeNetMode(n namespaces.NetworkMode) error {
if !n.IsBridge() {
- return errors.Wrapf(define.ErrNetworkModeInvalid, "%q is not supported", n)
+ return fmt.Errorf("%q is not supported: %w", n, define.ErrNetworkModeInvalid)
}
return nil
}
@@ -884,7 +882,7 @@ func isBridgeNetMode(n namespaces.NetworkMode) error {
// extend this to stop + restart slirp4netns
func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.StatusBlock, error) {
if ctr.state.NetNS == nil {
- return nil, errors.Wrapf(define.ErrCtrStateInvalid, "container %s network is not configured, refusing to reload", ctr.ID())
+ return nil, fmt.Errorf("container %s network is not configured, refusing to reload: %w", ctr.ID(), define.ErrCtrStateInvalid)
}
if err := isBridgeNetMode(ctr.config.NetMode); err != nil {
return nil, err
@@ -930,6 +928,8 @@ func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.Statu
return r.configureNetNS(ctr, ctr.state.NetNS)
}
+// TODO (5.0): return the statistics per network interface
+// This would allow better compat with docker.
func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) {
var netStats *netlink.LinkStatistics
@@ -943,21 +943,39 @@ func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) {
return nil, nil
}
- // FIXME get the interface from the container netstatus
- dev := "eth0"
netMode := ctr.config.NetMode
+ netStatus := ctr.getNetworkStatus()
if otherCtr != nil {
netMode = otherCtr.config.NetMode
+ netStatus = otherCtr.getNetworkStatus()
}
if netMode.IsSlirp4netns() {
- dev = "tap0"
+ // create a fake status with correct interface name for the logic below
+ netStatus = map[string]types.StatusBlock{
+ "slirp4netns": {
+ Interfaces: map[string]types.NetInterface{"tap0": {}},
+ },
+ }
}
err := ns.WithNetNSPath(netNSPath, func(_ ns.NetNS) error {
- link, err := netlink.LinkByName(dev)
- if err != nil {
- return err
+ for _, status := range netStatus {
+ for dev := range status.Interfaces {
+ link, err := netlink.LinkByName(dev)
+ if err != nil {
+ return err
+ }
+ if netStats == nil {
+ netStats = link.Attrs().Statistics
+ continue
+ }
+ // Currently only Tx/RxBytes are used.
+ // In the future we should return all stats per interface so that
+ // api users have a better options.
+ stats := link.Attrs().Statistics
+ netStats.TxBytes += stats.TxBytes
+ netStats.RxBytes += stats.RxBytes
+ }
}
- netStats = link.Attrs().Statistics
return nil
})
return netStats, err
@@ -1031,7 +1049,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
// If we have networks - handle that here
if len(networks) > 0 {
if len(networks) != len(netStatus) {
- return nil, errors.Wrapf(define.ErrInternal, "network inspection mismatch: asked to join %d network(s) %v, but have information on %d network(s)", len(networks), networks, len(netStatus))
+ return nil, fmt.Errorf("network inspection mismatch: asked to join %d network(s) %v, but have information on %d network(s): %w", len(networks), networks, len(netStatus), define.ErrInternal)
}
settings.Networks = make(map[string]*define.InspectAdditionalNetwork)
@@ -1056,7 +1074,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
// If not joining networks, we should have at most 1 result
if len(netStatus) > 1 {
- return nil, errors.Wrapf(define.ErrInternal, "should have at most 1 network status result if not joining networks, instead got %d", len(netStatus))
+ return nil, fmt.Errorf("should have at most 1 network status result if not joining networks, instead got %d: %w", len(netStatus), define.ErrInternal)
}
if len(netStatus) == 1 {
@@ -1069,7 +1087,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
func (c *Container) joinedNetworkNSPath() string {
for _, namespace := range c.config.Spec.Linux.Namespaces {
- if namespace.Type == spec.NetworkNamespace {
+ if namespace.Type == specs.NetworkNamespace {
return namespace.Path
}
}
@@ -1209,7 +1227,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
_, nameExists := networks[netName]
if !nameExists && len(networks) > 0 {
- return errors.Errorf("container %s is not connected to network %s", nameOrID, netName)
+ return fmt.Errorf("container %s is not connected to network %s", nameOrID, netName)
}
if err := c.syncContainer(); err != nil {
@@ -1228,7 +1246,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
}
if c.state.NetNS == nil {
- return errors.Wrapf(define.ErrNoNetwork, "unable to disconnect %s from %s", nameOrID, netName)
+ return fmt.Errorf("unable to disconnect %s from %s: %w", nameOrID, netName, define.ErrNoNetwork)
}
opts := types.NetworkOptions{
@@ -1346,7 +1364,7 @@ func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNe
return nil
}
if c.state.NetNS == nil {
- return errors.Wrapf(define.ErrNoNetwork, "unable to connect %s to %s", nameOrID, netName)
+ return fmt.Errorf("unable to connect %s to %s: %w", nameOrID, netName, define.ErrNoNetwork)
}
opts := types.NetworkOptions{
diff --git a/libpod/networking_slirp4netns.go b/libpod/networking_slirp4netns.go
index 788834435..4a6462d46 100644
--- a/libpod/networking_slirp4netns.go
+++ b/libpod/networking_slirp4netns.go
@@ -5,6 +5,7 @@ package libpod
import (
"bytes"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -24,7 +25,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/rootlessport"
"github.com/containers/podman/v4/pkg/servicereaper"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -68,7 +68,7 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) {
cmd := exec.Command(path, "--help")
out, err := cmd.CombinedOutput()
if err != nil {
- return nil, errors.Wrapf(err, "slirp4netns %q", out)
+ return nil, fmt.Errorf("slirp4netns %q: %w", out, err)
}
return &slirpFeatures{
HasDisableHostLoopback: strings.Contains(string(out), "--disable-host-loopback"),
@@ -95,14 +95,14 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
for _, o := range slirpOptions {
parts := strings.SplitN(o, "=", 2)
if len(parts) < 2 {
- return nil, errors.Errorf("unknown option for slirp4netns: %q", o)
+ return nil, fmt.Errorf("unknown option for slirp4netns: %q", o)
}
option, value := parts[0], parts[1]
switch option {
case "cidr":
ipv4, _, err := net.ParseCIDR(value)
if err != nil || ipv4.To4() == nil {
- return nil, errors.Errorf("invalid cidr %q", value)
+ return nil, fmt.Errorf("invalid cidr %q", value)
}
slirp4netnsOpts.cidr = value
case "port_handler":
@@ -112,7 +112,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
case "rootlesskit":
slirp4netnsOpts.isSlirpHostForward = false
default:
- return nil, errors.Errorf("unknown port_handler for slirp4netns: %q", value)
+ return nil, fmt.Errorf("unknown port_handler for slirp4netns: %q", value)
}
case "allow_host_loopback":
switch value {
@@ -121,7 +121,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
case "false":
slirp4netnsOpts.disableHostLoopback = true
default:
- return nil, errors.Errorf("invalid value of allow_host_loopback for slirp4netns: %q", value)
+ return nil, fmt.Errorf("invalid value of allow_host_loopback for slirp4netns: %q", value)
}
case "enable_ipv6":
switch value {
@@ -130,14 +130,14 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
case "false":
slirp4netnsOpts.enableIPv6 = false
default:
- return nil, errors.Errorf("invalid value of enable_ipv6 for slirp4netns: %q", value)
+ return nil, fmt.Errorf("invalid value of enable_ipv6 for slirp4netns: %q", value)
}
case "outbound_addr":
ipv4 := net.ParseIP(value)
if ipv4 == nil || ipv4.To4() == nil {
_, err := net.InterfaceByName(value)
if err != nil {
- return nil, errors.Errorf("invalid outbound_addr %q", value)
+ return nil, fmt.Errorf("invalid outbound_addr %q", value)
}
}
slirp4netnsOpts.outboundAddr = value
@@ -146,7 +146,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
if ipv6 == nil || ipv6.To4() != nil {
_, err := net.InterfaceByName(value)
if err != nil {
- return nil, errors.Errorf("invalid outbound_addr6: %q", value)
+ return nil, fmt.Errorf("invalid outbound_addr6: %q", value)
}
}
slirp4netnsOpts.outboundAddr6 = value
@@ -154,10 +154,10 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
var err error
slirp4netnsOpts.mtu, err = strconv.Atoi(value)
if slirp4netnsOpts.mtu < 68 || err != nil {
- return nil, errors.Errorf("invalid mtu %q", value)
+ return nil, fmt.Errorf("invalid mtu %q", value)
}
default:
- return nil, errors.Errorf("unknown option for slirp4netns: %q", o)
+ return nil, fmt.Errorf("unknown option for slirp4netns: %q", o)
}
}
return slirp4netnsOpts, nil
@@ -180,31 +180,31 @@ func createBasicSlirp4netnsCmdArgs(options *slirp4netnsNetworkOptions, features
if options.cidr != "" {
if !features.HasCIDR {
- return nil, errors.Errorf("cidr not supported")
+ return nil, fmt.Errorf("cidr not supported")
}
cmdArgs = append(cmdArgs, fmt.Sprintf("--cidr=%s", options.cidr))
}
if options.enableIPv6 {
if !features.HasIPv6 {
- return nil, errors.Errorf("enable_ipv6 not supported")
+ return nil, fmt.Errorf("enable_ipv6 not supported")
}
cmdArgs = append(cmdArgs, "--enable-ipv6")
}
if options.outboundAddr != "" {
if !features.HasOutboundAddr {
- return nil, errors.Errorf("outbound_addr not supported")
+ return nil, fmt.Errorf("outbound_addr not supported")
}
cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr=%s", options.outboundAddr))
}
if options.outboundAddr6 != "" {
if !features.HasOutboundAddr || !features.HasIPv6 {
- return nil, errors.Errorf("outbound_addr6 not supported")
+ return nil, fmt.Errorf("outbound_addr6 not supported")
}
if !options.enableIPv6 {
- return nil, errors.Errorf("enable_ipv6=true is required for outbound_addr6")
+ return nil, fmt.Errorf("enable_ipv6=true is required for outbound_addr6")
}
cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr6=%s", options.outboundAddr6))
}
@@ -225,7 +225,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
syncR, syncW, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "failed to open pipe")
+ return fmt.Errorf("failed to open pipe: %w", err)
}
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
@@ -243,7 +243,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
}
slirpFeatures, err := checkSlirpFlags(path)
if err != nil {
- return errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err)
+ return fmt.Errorf("error checking slirp4netns binary %s: %q: %w", path, err, err)
}
cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures)
if err != nil {
@@ -266,7 +266,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
if !ctr.config.PostConfigureNetNS {
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
if err != nil {
- return errors.Wrapf(err, "failed to create rootless network sync pipe")
+ return fmt.Errorf("failed to create rootless network sync pipe: %w", err)
}
netnsPath = netns.Path()
cmdArgs = append(cmdArgs, "--netns-type=path", netnsPath, "tap0")
@@ -295,13 +295,13 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
logFile, err := os.Create(logPath)
if err != nil {
- return errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath)
+ return fmt.Errorf("failed to open slirp4netns log file %s: %w", logPath, err)
}
defer logFile.Close()
// Unlink immediately the file so we won't need to worry about cleaning it up later.
// It is still accessible through the open fd logFile.
if err := os.Remove(logPath); err != nil {
- return errors.Wrapf(err, "delete file %s", logPath)
+ return fmt.Errorf("delete file %s: %w", logPath, err)
}
cmd.Stdout = logFile
cmd.Stderr = logFile
@@ -357,7 +357,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
if netOptions.enableIPv6 {
slirpReadyWg.Done()
}
- return errors.Wrapf(err, "failed to start slirp4netns process")
+ return fmt.Errorf("failed to start slirp4netns process: %w", err)
}
defer func() {
servicereaper.AddPID(cmd.Process.Pid)
@@ -381,7 +381,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
if netOptions.cidr != "" {
ipv4, ipv4network, err := net.ParseCIDR(netOptions.cidr)
if err != nil || ipv4.To4() == nil {
- return errors.Errorf("invalid cidr %q", netOptions.cidr)
+ return fmt.Errorf("invalid cidr %q", netOptions.cidr)
}
ctr.slirp4netnsSubnet = ipv4network
}
@@ -405,7 +405,7 @@ func GetSlirp4netnsIP(subnet *net.IPNet) (*net.IP, error) {
}
expectedIP, err := addToIP(slirpSubnet, uint32(100))
if err != nil {
- return nil, errors.Wrapf(err, "error calculating expected ip for slirp4netns")
+ return nil, fmt.Errorf("error calculating expected ip for slirp4netns: %w", err)
}
return expectedIP, nil
}
@@ -419,7 +419,7 @@ func GetSlirp4netnsGateway(subnet *net.IPNet) (*net.IP, error) {
}
expectedGatewayIP, err := addToIP(slirpSubnet, uint32(2))
if err != nil {
- return nil, errors.Wrapf(err, "error calculating expected gateway ip for slirp4netns")
+ return nil, fmt.Errorf("error calculating expected gateway ip for slirp4netns: %w", err)
}
return expectedGatewayIP, nil
}
@@ -433,7 +433,7 @@ func GetSlirp4netnsDNS(subnet *net.IPNet) (*net.IP, error) {
}
expectedDNSIP, err := addToIP(slirpSubnet, uint32(3))
if err != nil {
- return nil, errors.Wrapf(err, "error calculating expected dns ip for slirp4netns")
+ return nil, fmt.Errorf("error calculating expected dns ip for slirp4netns: %w", err)
}
return expectedDNSIP, nil
}
@@ -448,11 +448,11 @@ func addToIP(subnet *net.IPNet, offset uint32) (*net.IP, error) {
ipNewRaw := ipInteger + offset
// Avoid overflows
if ipNewRaw < ipInteger {
- return nil, errors.Errorf("integer overflow while calculating ip address offset, %s + %d", ipFixed, offset)
+ return nil, fmt.Errorf("integer overflow while calculating ip address offset, %s + %d", ipFixed, offset)
}
ipNew := net.IPv4(byte(ipNewRaw>>24), byte(ipNewRaw>>16&0xFF), byte(ipNewRaw>>8)&0xFF, byte(ipNewRaw&0xFF))
if !subnet.Contains(ipNew) {
- return nil, errors.Errorf("calculated ip address %s is not within given subnet %s", ipNew.String(), subnet.String())
+ return nil, fmt.Errorf("calculated ip address %s is not within given subnet %s", ipNew.String(), subnet.String())
}
return &ipNew, nil
}
@@ -465,7 +465,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
b := make([]byte, 16)
for {
if err := syncR.SetDeadline(time.Now().Add(timeout)); err != nil {
- return errors.Wrapf(err, "error setting %s pipe timeout", prog)
+ return fmt.Errorf("error setting %s pipe timeout: %w", prog, err)
}
// FIXME: return err as soon as proc exits, without waiting for timeout
if _, err := syncR.Read(b); err == nil {
@@ -476,7 +476,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
var status syscall.WaitStatus
pid, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
if err != nil {
- return errors.Wrapf(err, "failed to read %s process status", prog)
+ return fmt.Errorf("failed to read %s process status: %w", prog, err)
}
if pid != cmd.Process.Pid {
continue
@@ -488,16 +488,16 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
}
logContent, err := ioutil.ReadAll(logFile)
if err != nil {
- return errors.Wrapf(err, "%s failed", prog)
+ return fmt.Errorf("%s failed: %w", prog, err)
}
- return errors.Errorf("%s failed: %q", prog, logContent)
+ return fmt.Errorf("%s failed: %q", prog, logContent)
}
if status.Signaled() {
- return errors.Errorf("%s killed by signal", prog)
+ return fmt.Errorf("%s killed by signal", prog)
}
continue
}
- return errors.Wrapf(err, "failed to read from %s sync pipe", prog)
+ return fmt.Errorf("failed to read from %s sync pipe: %w", prog, err)
}
}
return nil
@@ -506,7 +506,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath string, netStatus map[string]types.StatusBlock) error {
syncR, syncW, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "failed to open pipe")
+ return fmt.Errorf("failed to open pipe: %w", err)
}
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
@@ -514,19 +514,19 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
logPath := filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("rootlessport-%s.log", ctr.config.ID))
logFile, err := os.Create(logPath)
if err != nil {
- return errors.Wrapf(err, "failed to open rootlessport log file %s", logPath)
+ return fmt.Errorf("failed to open rootlessport log file %s: %w", logPath, err)
}
defer logFile.Close()
// Unlink immediately the file so we won't need to worry about cleaning it up later.
// It is still accessible through the open fd logFile.
if err := os.Remove(logPath); err != nil {
- return errors.Wrapf(err, "delete file %s", logPath)
+ return fmt.Errorf("delete file %s: %w", logPath, err)
}
if !ctr.config.PostConfigureNetNS {
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
if err != nil {
- return errors.Wrapf(err, "failed to create rootless port sync pipe")
+ return fmt.Errorf("failed to create rootless port sync pipe: %w", err)
}
}
@@ -566,7 +566,7 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
Setpgid: true,
}
if err := cmd.Start(); err != nil {
- return errors.Wrapf(err, "failed to start rootlessport process")
+ return fmt.Errorf("failed to start rootlessport process: %w", err)
}
defer func() {
servicereaper.AddPID(cmd.Process.Pid)
@@ -579,7 +579,7 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
if stdoutStr != "" {
// err contains full debug log and too verbose, so return stdoutStr
logrus.Debug(err)
- return errors.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n"))
+ return fmt.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n"))
}
return err
}
@@ -612,7 +612,7 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd
// wait that API socket file appears before trying to use it.
if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout); err != nil {
- return errors.Wrapf(err, "waiting for slirp4nets to create the api socket file %s", apiSocket)
+ return fmt.Errorf("waiting for slirp4nets to create the api socket file %s: %w", apiSocket, err)
}
// for each port we want to add we need to open a connection to the slirp4netns control socket
@@ -639,7 +639,7 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd
func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport uint16) error {
conn, err := net.Dial("unix", apiSocket)
if err != nil {
- return errors.Wrapf(err, "cannot open connection to %s", apiSocket)
+ return fmt.Errorf("cannot open connection to %s: %w", apiSocket, err)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -659,27 +659,27 @@ func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport ui
// to the socket, as requested by slirp4netns.
data, err := json.Marshal(&apiCmd)
if err != nil {
- return errors.Wrapf(err, "cannot marshal JSON for slirp4netns")
+ return fmt.Errorf("cannot marshal JSON for slirp4netns: %w", err)
}
if _, err := conn.Write([]byte(fmt.Sprintf("%s\n", data))); err != nil {
- return errors.Wrapf(err, "cannot write to control socket %s", apiSocket)
+ return fmt.Errorf("cannot write to control socket %s: %w", apiSocket, err)
}
if err := conn.(*net.UnixConn).CloseWrite(); err != nil {
- return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
+ return fmt.Errorf("cannot shutdown the socket %s: %w", apiSocket, err)
}
buf := make([]byte, 2048)
readLength, err := conn.Read(buf)
if err != nil {
- return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
+ return fmt.Errorf("cannot read from control socket %s: %w", apiSocket, err)
}
// if there is no 'error' key in the received JSON data, then the operation was
// successful.
var y map[string]interface{}
if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
- return errors.Wrapf(err, "error parsing error status from slirp4netns")
+ return fmt.Errorf("error parsing error status from slirp4netns: %w", err)
}
if e, found := y["error"]; found {
- return errors.Errorf("from slirp4netns while setting up port redirection: %v", e)
+ return fmt.Errorf("from slirp4netns while setting up port redirection: %v", e)
}
return nil
}
@@ -722,21 +722,21 @@ func (c *Container) reloadRootlessRLKPortMapping() error {
conn, err := openUnixSocket(filepath.Join(c.runtime.config.Engine.TmpDir, "rp", c.config.ID))
if err != nil {
- return errors.Wrap(err, "could not reload rootless port mappings, port forwarding may no longer work correctly")
+ return fmt.Errorf("could not reload rootless port mappings, port forwarding may no longer work correctly: %w", err)
}
defer conn.Close()
enc := json.NewEncoder(conn)
err = enc.Encode(childIP)
if err != nil {
- return errors.Wrap(err, "port reloading failed")
+ return fmt.Errorf("port reloading failed: %w", err)
}
b, err := ioutil.ReadAll(conn)
if err != nil {
- return errors.Wrap(err, "port reloading failed")
+ return fmt.Errorf("port reloading failed: %w", err)
}
data := string(b)
if data != "OK" {
- return errors.Errorf("port reloading failed: %s", data)
+ return fmt.Errorf("port reloading failed: %s", data)
}
return nil
}
diff --git a/libpod/oci.go b/libpod/oci.go
index 09f856ac7..70053db1b 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -3,6 +3,7 @@ package libpod
import (
"net/http"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod/define"
)
@@ -12,9 +13,7 @@ import (
// management logic - e.g., we do not expect it to determine on its own that
// calling 'UnpauseContainer()' on a container that is not paused is an error.
// The code calling the OCIRuntime will manage this.
-// TODO: May want to move the Attach() code under this umbrella. It's highly OCI
-// runtime dependent.
-// TODO: May want to move the conmon cleanup code here too - it depends on
+// TODO: May want to move the conmon cleanup code here - it depends on
// Conmon being in use.
type OCIRuntime interface {
// Name returns the name of the runtime.
@@ -52,6 +51,8 @@ type OCIRuntime interface {
// UnpauseContainer unpauses the given container.
UnpauseContainer(ctr *Container) error
+ // Attach to a container.
+ Attach(ctr *Container, params *AttachOptions) error
// HTTPAttach performs an attach intended to be transported over HTTP.
// For terminal attach, the container's output will be directly streamed
// to output; otherwise, STDOUT and STDERR will be multiplexed, with
@@ -66,7 +67,7 @@ type OCIRuntime interface {
// client.
HTTPAttach(ctr *Container, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) error
// AttachResize resizes the terminal in use by the given container.
- AttachResize(ctr *Container, newSize define.TerminalSize) error
+ AttachResize(ctr *Container, newSize resize.TerminalSize) error
// ExecContainer executes a command in a running container.
// Returns an int (PID of exec session), error channel (errors from
@@ -76,7 +77,7 @@ type OCIRuntime interface {
// running, in a goroutine that will return via the chan error in the
// return signature.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
- ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *define.TerminalSize) (int, chan error, error)
+ ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *resize.TerminalSize) (int, chan error, error)
// ExecContainerHTTP executes a command in a running container and
// attaches its standard streams to a provided hijacked HTTP session.
// Maintains the same invariants as ExecContainer (returns on session
@@ -84,14 +85,14 @@ type OCIRuntime interface {
// The HTTP attach itself maintains the same invariants as HTTPAttach.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, r *http.Request, w http.ResponseWriter,
- streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *define.TerminalSize) (int, chan error, error)
+ streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *resize.TerminalSize) (int, chan error, error)
// ExecContainerDetached executes a command in a running container, but
// does not attach to it. Returns the PID of the exec session and an
// error (if starting the exec session failed)
ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error)
// ExecAttachResize resizes the terminal of a running exec session. Only
// allowed with sessions that were created with a TTY.
- ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error
+ ExecAttachResize(ctr *Container, sessionID string, newSize resize.TerminalSize) error
// ExecStopContainer stops a given exec session in a running container.
// SIGTERM with be sent initially, then SIGKILL after the given timeout.
// If timeout is 0, SIGKILL will be sent immediately, and SIGTERM will
@@ -149,6 +150,30 @@ type OCIRuntime interface {
RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeInfo, error)
}
+// AttachOptions are options used when attached to a container or an exec
+// session.
+type AttachOptions struct {
+ // Streams are the streams to attach to.
+ Streams *define.AttachStreams
+ // DetachKeys containers the key combination that will detach from the
+ // attach session. Empty string is assumed as no detach keys - user
+ // detach is impossible. If unset, defaults from containers.conf will be
+ // used.
+ DetachKeys *string
+ // InitialSize is the initial size of the terminal. Set before the
+ // attach begins.
+ InitialSize *resize.TerminalSize
+ // AttachReady signals when the attach has successfully completed and
+ // streaming has begun.
+ AttachReady chan<- bool
+ // Start indicates that the container should be started if it is not
+ // already running.
+ Start bool
+ // Started signals when the container has been successfully started.
+ // Required if Start is true, unused otherwise.
+ Started chan<- bool
+}
+
// ExecOptions are options passed into ExecContainer. They control the command
// that will be executed and how the exec will proceed.
type ExecOptions struct {
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_conmon_attach_linux.go
index 06f8f8719..aa55aa6f5 100644
--- a/libpod/oci_attach_linux.go
+++ b/libpod/oci_conmon_attach_linux.go
@@ -4,6 +4,7 @@
package libpod
import (
+ "errors"
"fmt"
"io"
"net"
@@ -12,12 +13,11 @@ import (
"syscall"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/resize"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/errorhandling"
- "github.com/containers/podman/v4/pkg/kubeutils"
- "github.com/containers/podman/v4/utils"
"github.com/moby/term"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -38,17 +38,26 @@ func openUnixSocket(path string) (*net.UnixConn, error) {
return net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: fmt.Sprintf("/proc/self/fd/%d", fd), Net: "unixpacket"})
}
-// Attach to the given container
-// Does not check if state is appropriate
-// started is only required if startContainer is true
-func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize, startContainer bool, started chan bool, attachRdy chan<- bool) error {
+// Attach to the given container.
+// Does not check if state is appropriate.
+// started is only required if startContainer is true.
+func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error {
passthrough := c.LogDriver() == define.PassthroughLogging
- if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput && !passthrough {
- return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ if params == nil || params.Streams == nil {
+ return fmt.Errorf("must provide parameters to Attach: %w", define.ErrInternal)
}
- if startContainer && started == nil {
- return errors.Wrapf(define.ErrInternal, "started chan not passed when startContainer set")
+
+ if !params.Streams.AttachOutput && !params.Streams.AttachError && !params.Streams.AttachInput && !passthrough {
+ return fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
+ }
+ if params.Start && params.Started == nil {
+ return fmt.Errorf("started chan not passed when startContainer set: %w", define.ErrInternal)
+ }
+
+ keys := config.DefaultDetachKeys
+ if params.DetachKeys != nil {
+ keys = *params.DetachKeys
}
detachKeys, err := processDetachKeys(keys)
@@ -60,7 +69,12 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
if !passthrough {
logrus.Debugf("Attaching to container %s", c.ID())
- registerResizeFunc(resize, c.bundlePath())
+ // If we have a resize, do it.
+ if params.InitialSize != nil {
+ if err := r.AttachResize(c, *params.InitialSize); err != nil {
+ return err
+ }
+ }
attachSock, err := c.AttachSocketPath()
if err != nil {
@@ -69,7 +83,7 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
conn, err = openUnixSocket(attachSock)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
+ return fmt.Errorf("failed to connect to container's attach socket: %v: %w", attachSock, err)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -80,22 +94,22 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
// If starting was requested, start the container and notify when that's
// done.
- if startContainer {
+ if params.Start {
if err := c.start(); err != nil {
return err
}
- started <- true
+ params.Started <- true
}
if passthrough {
return nil
}
- receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys)
- if attachRdy != nil {
- attachRdy <- true
+ receiveStdoutError, stdinDone := setupStdioChannels(params.Streams, conn, detachKeys)
+ if params.AttachReady != nil {
+ params.AttachReady <- true
}
- return readStdio(conn, streams, receiveStdoutError, stdinDone)
+ return readStdio(conn, params.Streams, receiveStdoutError, stdinDone)
}
// Attach to the given container's exec session
@@ -106,7 +120,7 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
// conmon will then send the exit code of the exec process, or an error in the exec session
// startFd must be the input side of the fd.
// newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty
-// conmon will wait to start the exec session until the parent process has setup the console socket.
+// conmon will wait to start the exec session until the parent process has set up the console socket.
// Once attachToExec successfully attaches to the console socket, the child conmon process responsible for calling runtime exec
// will read from the output side of start fd, thus learning to start the child process.
// Thus, the order goes as follow:
@@ -116,12 +130,12 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
// 4. attachToExec sends on startFd, signalling it has attached to the socket and child is ready to go
// 5. child receives on startFd, runs the runtime exec command
// attachToExec is responsible for closing startFd and attachFd
-func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File, newSize *define.TerminalSize) error {
+func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File, newSize *resize.TerminalSize) error {
if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
- return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ return fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
}
if startFd == nil || attachFd == nil {
- return errors.Wrapf(define.ErrInvalidArg, "start sync pipe and attach sync pipe must be defined for exec attach")
+ return fmt.Errorf("start sync pipe and attach sync pipe must be defined for exec attach: %w", define.ErrInvalidArg)
}
defer errorhandling.CloseQuiet(startFd)
@@ -160,7 +174,7 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se
// 2: then attach
conn, err := openUnixSocket(sockPath)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath)
+ return fmt.Errorf("failed to connect to container's attach socket: %v: %w", sockPath, err)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -186,13 +200,13 @@ func processDetachKeys(keys string) ([]byte, error) {
}
detachKeys, err := term.ToBytes(keys)
if err != nil {
- return nil, errors.Wrapf(err, "invalid detach keys")
+ return nil, fmt.Errorf("invalid detach keys: %w", err)
}
return detachKeys, nil
}
-func registerResizeFunc(resize <-chan define.TerminalSize, bundlePath string) {
- kubeutils.HandleResizing(resize, func(size define.TerminalSize) {
+func registerResizeFunc(r <-chan resize.TerminalSize, bundlePath string) {
+ resize.HandleResizing(r, func(size resize.TerminalSize) {
controlPath := filepath.Join(bundlePath, "ctl")
controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
if err != nil {
@@ -218,7 +232,7 @@ func setupStdioChannels(streams *define.AttachStreams, conn *net.UnixConn, detac
go func() {
var err error
if streams.AttachInput {
- _, err = utils.CopyDetachable(conn, streams.InputStream, detachKeys)
+ _, err = util.CopyDetachable(conn, streams.InputStream, detachKeys)
}
stdinDone <- err
}()
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
index 70124bec1..16cd7ef9f 100644
--- a/libpod/oci_conmon_exec_linux.go
+++ b/libpod/oci_conmon_exec_linux.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -13,28 +14,28 @@ import (
"github.com/containers/common/pkg/capabilities"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/resize"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/lookup"
"github.com/containers/podman/v4/pkg/util"
- "github.com/containers/podman/v4/utils"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// ExecContainer executes a command in a running container
-func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *define.TerminalSize) (int, chan error, error) {
+func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *resize.TerminalSize) (int, chan error, error) {
if options == nil {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
+ return -1, nil, fmt.Errorf("must provide an ExecOptions struct to ExecContainer: %w", define.ErrInvalidArg)
}
if len(options.Cmd) == 0 {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ return -1, nil, fmt.Errorf("must provide a command to execute: %w", define.ErrInvalidArg)
}
if sessionID == "" {
- return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
+ return -1, nil, fmt.Errorf("must provide a session ID for exec: %w", define.ErrEmptyID)
}
// TODO: Should we default this to false?
@@ -73,7 +74,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
}()
if err := execCmd.Wait(); err != nil {
- return -1, nil, errors.Wrapf(err, "cannot run conmon")
+ return -1, nil, fmt.Errorf("cannot run conmon: %w", err)
}
pid, err := readConmonPipeData(r.name, pipes.syncPipe, ociLog)
@@ -84,15 +85,15 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
// ExecContainerHTTP executes a new command in an existing container and
// forwards its standard streams over an attach
func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, req *http.Request, w http.ResponseWriter,
- streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *define.TerminalSize) (int, chan error, error) {
+ streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *resize.TerminalSize) (int, chan error, error) {
if streams != nil {
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ return -1, nil, fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
}
}
if options == nil {
- return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
+ return -1, nil, fmt.Errorf("must provide exec options to ExecContainerHTTP: %w", define.ErrInvalidArg)
}
detachString := config.DefaultDetachKeys
@@ -156,7 +157,7 @@ type conmonPipeData struct {
// not attach to it.
func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
if options == nil {
- return -1, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
+ return -1, fmt.Errorf("must provide exec options to ExecContainerHTTP: %w", define.ErrInvalidArg)
}
var ociLog string
@@ -187,7 +188,7 @@ func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID strin
// Wait for conmon to succeed, when return.
if err := execCmd.Wait(); err != nil {
- return -1, errors.Wrapf(err, "cannot run conmon")
+ return -1, fmt.Errorf("cannot run conmon: %w", err)
}
pid, err := readConmonPipeData(r.name, pipes.syncPipe, ociLog)
@@ -196,7 +197,7 @@ func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID strin
}
// ExecAttachResize resizes the TTY of the given exec session.
-func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error {
+func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize resize.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
if err != nil {
return err
@@ -204,7 +205,7 @@ func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, ne
defer controlFile.Close()
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
- return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
+ return fmt.Errorf("failed to write to ctl file to resize terminal: %w", err)
}
return nil
@@ -225,7 +226,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
+ return fmt.Errorf("error pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err)
}
if timeout > 0 {
@@ -235,7 +236,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
+ return fmt.Errorf("error killing container %s exec session %s PID %d with SIGTERM: %w", ctr.ID(), sessionID, pid, err)
}
// Wait for the PID to stop
@@ -253,12 +254,12 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
if err == unix.ESRCH {
return nil
}
- return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
+ return fmt.Errorf("error killing container %s exec session %s PID %d with SIGKILL: %w", ctr.ID(), sessionID, pid, err)
}
// Wait for the PID to stop
if err := waitPidStop(pid, killContainerTimeout); err != nil {
- return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
+ return fmt.Errorf("timed out waiting for container %s exec session %s PID %d to stop after SIGKILL: %w", ctr.ID(), sessionID, pid, err)
}
return nil
@@ -279,7 +280,7 @@ func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (b
if err == unix.ESRCH {
return false, nil
}
- return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
+ return false, fmt.Errorf("error pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err)
}
return true, nil
@@ -289,7 +290,7 @@ func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (b
func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
// We don't even use container, so don't validity check it
if sessionID == "" {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
+ return "", fmt.Errorf("must provide a valid session ID to get attach socket path: %w", define.ErrInvalidArg)
}
return filepath.Join(ctr.execBundlePath(sessionID), "attach"), nil
@@ -325,20 +326,20 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
pipes := new(execPipes)
if options == nil {
- return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
+ return nil, nil, fmt.Errorf("must provide an ExecOptions struct to ExecContainer: %w", define.ErrInvalidArg)
}
if len(options.Cmd) == 0 {
- return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ return nil, nil, fmt.Errorf("must provide a command to execute: %w", define.ErrInvalidArg)
}
if sessionID == "" {
- return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
+ return nil, nil, fmt.Errorf("must provide a session ID for exec: %w", define.ErrEmptyID)
}
// create sync pipe to receive the pid
parentSyncPipe, childSyncPipe, err := newPipe()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
}
pipes.syncPipe = parentSyncPipe
@@ -352,7 +353,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
// attachToExec is responsible for closing parentStartPipe
childStartPipe, parentStartPipe, err := newPipe()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
}
pipes.startPipe = parentStartPipe
@@ -362,7 +363,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
// attachToExec is responsible for closing parentAttachPipe
parentAttachPipe, childAttachPipe, err := newPipe()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating socket pair")
+ return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
}
pipes.attachPipe = parentAttachPipe
@@ -471,7 +472,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
childrenClosed = true
if err != nil {
- return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
+ return nil, nil, fmt.Errorf("cannot start container %s: %w", c.ID(), err)
}
if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
return nil, nil, err
@@ -487,14 +488,14 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
}
// Attach to a container over HTTP
-func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, execCmd *exec.Cmd, conmonPipeDataChan chan<- conmonPipeData, ociLog string, newSize *define.TerminalSize, runtimeName string) (deferredErr error) {
+func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, execCmd *exec.Cmd, conmonPipeDataChan chan<- conmonPipeData, ociLog string, newSize *resize.TerminalSize, runtimeName string) (deferredErr error) {
// NOTE: As you may notice, the attach code is quite complex.
// Many things happen concurrently and yet are interdependent.
// If you ever change this function, make sure to write to the
// conmonPipeDataChan in case of an error.
if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil {
- err := errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach")
+ err := fmt.Errorf("must provide a start and attach pipe to finish an exec attach: %w", define.ErrInvalidArg)
conmonPipeDataChan <- conmonPipeData{-1, err}
return err
}
@@ -537,7 +538,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
conn, err := openUnixSocket(sockPath)
if err != nil {
conmonPipeDataChan <- conmonPipeData{-1, err}
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath)
+ return fmt.Errorf("failed to connect to container's attach socket: %v: %w", sockPath, err)
}
defer func() {
if err := conn.Close(); err != nil {
@@ -558,13 +559,13 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
hijacker, ok := w.(http.Hijacker)
if !ok {
conmonPipeDataChan <- conmonPipeData{-1, err}
- return errors.Errorf("unable to hijack connection")
+ return errors.New("unable to hijack connection")
}
httpCon, httpBuf, err := hijacker.Hijack()
if err != nil {
conmonPipeDataChan <- conmonPipeData{-1, err}
- return errors.Wrapf(err, "error hijacking connection")
+ return fmt.Errorf("error hijacking connection: %w", err)
}
hijackDone <- true
@@ -575,7 +576,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
// Force a flush after the header is written.
if err := httpBuf.Flush(); err != nil {
conmonPipeDataChan <- conmonPipeData{-1, err}
- return errors.Wrapf(err, "error flushing HTTP hijack header")
+ return fmt.Errorf("error flushing HTTP hijack header: %w", err)
}
go func() {
@@ -607,7 +608,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
if attachStdin {
go func() {
logrus.Debugf("Beginning STDIN copy")
- _, err := utils.CopyDetachable(conn, httpBuf, detachKeys)
+ _, err := cutil.CopyDetachable(conn, httpBuf, detachKeys)
logrus.Debugf("STDIN copy completed")
stdinChan <- err
}()
@@ -723,7 +724,7 @@ func prepareProcessExec(c *Container, options *ExecOptions, env []string, sessio
if len(addGroups) > 0 {
sgids, err = lookup.GetContainerGroups(addGroups, c.state.Mountpoint, overrides)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s exec session %s", c.ID(), sessionID)
+ return nil, fmt.Errorf("error looking up supplemental groups for container %s exec session %s: %w", c.ID(), sessionID, err)
}
}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 6aa7ce6dc..0cdfe90e9 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -7,6 +7,7 @@ import (
"bufio"
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -23,8 +24,13 @@ import (
"text/template"
"time"
+ runcconfig "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/devices"
+
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/resize"
+ cutil "github.com/containers/common/pkg/util"
conmonConfig "github.com/containers/conmon/runner/config"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/logs"
@@ -38,7 +44,6 @@ import (
pmount "github.com/containers/storage/pkg/mount"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -78,7 +83,7 @@ type ConmonOCIRuntime struct {
// libpod.
func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeFlags []string, runtimeCfg *config.Config) (OCIRuntime, error) {
if name == "" {
- return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name")
+ return nil, fmt.Errorf("the OCI runtime must be provided a non-empty name: %w", define.ErrInvalidArg)
}
// Make lookup tables for runtime support
@@ -122,7 +127,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
if os.IsNotExist(err) {
continue
}
- return nil, errors.Wrapf(err, "cannot stat OCI runtime %s path", name)
+ return nil, fmt.Errorf("cannot stat OCI runtime %s path: %w", name, err)
}
if !stat.Mode().IsRegular() {
continue
@@ -143,7 +148,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
}
if !foundPath {
- return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name)
+ return nil, fmt.Errorf("no valid executable found for OCI runtime %s: %w", name, define.ErrInvalidArg)
}
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
@@ -152,7 +157,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
- return nil, errors.Wrapf(err, "error creating OCI runtime exit files directory")
+ return nil, fmt.Errorf("error creating OCI runtime exit files directory: %w", err)
}
}
return runtime, nil
@@ -228,7 +233,7 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
// changes are propagated to the host.
err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "")
if err != nil {
- return 0, errors.Wrapf(err, "cannot make /sys slave")
+ return 0, fmt.Errorf("cannot make /sys slave: %w", err)
}
mounts, err := pmount.GetMounts()
@@ -241,7 +246,7 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
}
err = unix.Unmount(m.Mountpoint, 0)
if err != nil && !os.IsNotExist(err) {
- return 0, errors.Wrapf(err, "cannot unmount %s", m.Mountpoint)
+ return 0, fmt.Errorf("cannot unmount %s: %w", m.Mountpoint, err)
}
}
return r.createOCIContainer(ctr, restoreOptions)
@@ -264,11 +269,6 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
// status, but will instead only check for the existence of the conmon exit file
// and update state to stopped if it exists.
func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
- exitFile, err := r.ExitFilePath(ctr)
- if err != nil {
- return err
- }
-
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
@@ -284,17 +284,17 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
outPipe, err := cmd.StdoutPipe()
if err != nil {
- return errors.Wrapf(err, "getting stdout pipe")
+ return fmt.Errorf("getting stdout pipe: %w", err)
}
errPipe, err := cmd.StderrPipe()
if err != nil {
- return errors.Wrapf(err, "getting stderr pipe")
+ return fmt.Errorf("getting stderr pipe: %w", err)
}
if err := cmd.Start(); err != nil {
out, err2 := ioutil.ReadAll(errPipe)
if err2 != nil {
- return errors.Wrapf(err, "error getting container %s state", ctr.ID())
+ return fmt.Errorf("error getting container %s state: %w", ctr.ID(), err)
}
if strings.Contains(string(out), "does not exist") || strings.Contains(string(out), "No such file") {
if err := ctr.removeConmonFiles(); err != nil {
@@ -305,7 +305,7 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
ctr.state.State = define.ContainerStateExited
return nil
}
- return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
+ return fmt.Errorf("error getting container %s state. stderr/out: %s: %w", ctr.ID(), out, err)
}
defer func() {
_ = cmd.Wait()
@@ -316,10 +316,10 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
}
out, err := ioutil.ReadAll(outPipe)
if err != nil {
- return errors.Wrapf(err, "error reading stdout: %s", ctr.ID())
+ return fmt.Errorf("error reading stdout: %s: %w", ctr.ID(), err)
}
if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(state); err != nil {
- return errors.Wrapf(err, "error decoding container status for container %s", ctr.ID())
+ return fmt.Errorf("error decoding container status for container %s: %w", ctr.ID(), err)
}
ctr.state.PID = state.Pid
@@ -333,29 +333,17 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
case "stopped":
ctr.state.State = define.ContainerStateStopped
default:
- return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s",
- ctr.ID(), state.Status)
+ return fmt.Errorf("unrecognized status returned by runtime for container %s: %s: %w",
+ ctr.ID(), state.Status, define.ErrInternal)
}
// Only grab exit status if we were not already stopped
// If we were, it should already be in the database
if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped {
- var fi os.FileInfo
- chWait := make(chan error)
- defer close(chWait)
-
- _, err := WaitForFile(exitFile, chWait, time.Second*5)
- if err == nil {
- fi, err = os.Stat(exitFile)
- }
- if err != nil {
- ctr.state.ExitCode = -1
- ctr.state.FinishedTime = time.Now()
- logrus.Errorf("No exit file for container %s found: %v", ctr.ID(), err)
- return nil
+ if _, err := ctr.Wait(context.Background()); err != nil {
+ logrus.Errorf("Waiting for container %s to exit: %v", ctr.ID(), err)
}
-
- return ctr.handleExitFile(exitFile, fi)
+ return nil
}
// Handle ContainerStateStopping - keep it unless the container
@@ -411,10 +399,10 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool)
if err2 := r.UpdateContainerStatus(ctr); err2 != nil {
logrus.Infof("Error updating status for container %s: %v", ctr.ID(), err2)
}
- if ctr.state.State == define.ContainerStateExited {
- return nil
+ if ctr.ensureState(define.ContainerStateStopped, define.ContainerStateExited) {
+ return define.ErrCtrStateInvalid
}
- return errors.Wrapf(err, "error sending signal to container %s", ctr.ID())
+ return fmt.Errorf("error sending signal to container %s: %w", ctr.ID(), err)
}
return nil
@@ -471,7 +459,7 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool)
return nil
}
- return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID())
+ return fmt.Errorf("error sending SIGKILL to container %s: %w", ctr.ID(), err)
}
// Give runtime a few seconds to make it happen
@@ -528,7 +516,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
if streams != nil {
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
- return errors.Wrapf(define.ErrInvalidArg, "must specify at least one stream to attach to")
+ return fmt.Errorf("must specify at least one stream to attach to: %w", define.ErrInvalidArg)
}
}
@@ -541,7 +529,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
if streamAttach {
newConn, err := openUnixSocket(attachSock)
if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
+ return fmt.Errorf("failed to connect to container's attach socket: %v: %w", attachSock, err)
}
conn = newConn
defer func() {
@@ -576,12 +564,12 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
// Alright, let's hijack.
hijacker, ok := w.(http.Hijacker)
if !ok {
- return errors.Errorf("unable to hijack connection")
+ return fmt.Errorf("unable to hijack connection")
}
httpCon, httpBuf, err := hijacker.Hijack()
if err != nil {
- return errors.Wrapf(err, "error hijacking connection")
+ return fmt.Errorf("error hijacking connection: %w", err)
}
hijackDone <- true
@@ -590,7 +578,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
// Force a flush after the header is written.
if err := httpBuf.Flush(); err != nil {
- return errors.Wrapf(err, "error flushing HTTP hijack header")
+ return fmt.Errorf("error flushing HTTP hijack header: %w", err)
}
defer func() {
@@ -705,7 +693,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
// Next, STDIN. Avoid entirely if attachStdin unset.
if attachStdin {
go func() {
- _, err := utils.CopyDetachable(conn, httpBuf, detach)
+ _, err := cutil.CopyDetachable(conn, httpBuf, detach)
logrus.Debugf("STDIN copy completed")
stdinChan <- err
}()
@@ -736,7 +724,8 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
// isRetryable returns whether the error was caused by a blocked syscall or the
// specified operation on a non blocking file descriptor wasn't ready for completion.
func isRetryable(err error) bool {
- if errno, isErrno := errors.Cause(err).(syscall.Errno); isErrno {
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
return errno == syscall.EINTR || errno == syscall.EAGAIN
}
return false
@@ -751,15 +740,15 @@ func openControlFile(ctr *Container, parentDir string) (*os.File, error) {
return controlFile, nil
}
if !isRetryable(err) {
- return nil, errors.Wrapf(err, "could not open ctl file for terminal resize for container %s", ctr.ID())
+ return nil, fmt.Errorf("could not open ctl file for terminal resize for container %s: %w", ctr.ID(), err)
}
time.Sleep(time.Second / 10)
}
- return nil, errors.Errorf("timeout waiting for %q", controlPath)
+ return nil, fmt.Errorf("timeout waiting for %q", controlPath)
}
// AttachResize resizes the terminal used by the given container.
-func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize define.TerminalSize) error {
+func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize resize.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.bundlePath())
if err != nil {
return err
@@ -768,7 +757,7 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize define.TerminalS
logrus.Debugf("Received a resize event for container %s: %+v", ctr.ID(), newSize)
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
- return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
+ return fmt.Errorf("failed to write to ctl file to resize terminal: %w", err)
}
return nil
@@ -876,7 +865,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
if err == unix.ESRCH {
return false, nil
}
- return false, errors.Wrapf(err, "error pinging container %s conmon with signal 0", ctr.ID())
+ return false, fmt.Errorf("error pinging container %s conmon with signal 0: %w", ctr.ID(), err)
}
return true, nil
}
@@ -908,7 +897,7 @@ func (r *ConmonOCIRuntime) SupportsKVM() bool {
// AttachSocketPath is the path to a single container's attach socket.
func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
if ctr == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get attach socket path")
+ return "", fmt.Errorf("must provide a valid container to get attach socket path: %w", define.ErrInvalidArg)
}
return filepath.Join(ctr.bundlePath(), "attach"), nil
@@ -917,7 +906,7 @@ func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
// ExitFilePath is the path to a container's exit file.
func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) {
if ctr == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path")
+ return "", fmt.Errorf("must provide a valid container to get exit file path: %w", define.ErrInvalidArg)
}
return filepath.Join(r.exitsDir, ctr.ID()), nil
}
@@ -928,11 +917,11 @@ func (r *ConmonOCIRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntime
conmonPackage := packageVersion(r.conmonPath)
runtimeVersion, err := r.getOCIRuntimeVersion()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error getting version of OCI runtime %s", r.name)
+ return nil, nil, fmt.Errorf("error getting version of OCI runtime %s: %w", r.name, err)
}
conmonVersion, err := r.getConmonVersion()
if err != nil {
- return nil, nil, errors.Wrapf(err, "error getting conmon version")
+ return nil, nil, fmt.Errorf("error getting conmon version: %w", err)
}
conmon := define.ConmonInfo{
@@ -1002,7 +991,7 @@ func waitPidStop(pid int, timeout time.Duration) error {
return nil
case <-time.After(timeout):
close(chControl)
- return errors.Errorf("given PIDs did not die within timeout")
+ return fmt.Errorf("given PIDs did not die within timeout")
}
}
@@ -1014,11 +1003,11 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) {
data, err := ctr.inspectLocked(false)
if err != nil {
// FIXME: this error should probably be returned
- return "", nil // nolint: nilerr
+ return "", nil //nolint: nilerr
}
tmpl, err := template.New("container").Parse(logTag)
if err != nil {
- return "", errors.Wrapf(err, "template parsing error %s", logTag)
+ return "", fmt.Errorf("template parsing error %s: %w", logTag, err)
}
var b bytes.Buffer
err = tmpl.Execute(&b, data)
@@ -1039,13 +1028,13 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
parentSyncPipe, childSyncPipe, err := newPipe()
if err != nil {
- return 0, errors.Wrapf(err, "error creating socket pair")
+ return 0, fmt.Errorf("error creating socket pair: %w", err)
}
defer errorhandling.CloseQuiet(parentSyncPipe)
childStartPipe, parentStartPipe, err := newPipe()
if err != nil {
- return 0, errors.Wrapf(err, "error creating socket pair for start pipe")
+ return 0, fmt.Errorf("error creating socket pair for start pipe: %w", err)
}
defer errorhandling.CloseQuiet(parentStartPipe)
@@ -1166,7 +1155,6 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
}).Debugf("running conmon: %s", r.conmonPath)
cmd := exec.Command(r.conmonPath, args...)
- cmd.Dir = ctr.bundlePath()
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
@@ -1217,12 +1205,12 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
if havePortMapping {
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
if err != nil {
- return 0, errors.Wrapf(err, "failed to create rootless port sync pipe")
+ return 0, fmt.Errorf("failed to create rootless port sync pipe: %w", err)
}
}
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
if err != nil {
- return 0, errors.Wrapf(err, "failed to create rootless network sync pipe")
+ return 0, fmt.Errorf("failed to create rootless network sync pipe: %w", err)
}
} else {
if ctr.rootlessSlirpSyncR != nil {
@@ -1354,8 +1342,6 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
logDriverArg = define.NoLogging
case define.PassthroughLogging:
logDriverArg = define.PassthroughLogging
- case define.JSONLogging:
- fallthrough
//lint:ignore ST1015 the default case has to be here
default: //nolint:stylecheck,gocritic
// No case here should happen except JSONLogging, but keep this here in case the options are extended
@@ -1365,6 +1351,8 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
// to get here, either a user would specify `--log-driver ""`, or this came from another place in libpod
// since the former case is obscure, and the latter case isn't an error, let's silently fallthrough
fallthrough
+ case define.JSONLogging:
+ fallthrough
case define.KubernetesLogging:
logDriverArg = fmt.Sprintf("%s:%s", define.KubernetesLogging, logPath)
}
@@ -1435,7 +1423,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
}
// $INVOCATION_ID is set by systemd when running as a service.
- if os.Getenv("INVOCATION_ID") != "" {
+ if ctr.runtime.RemoteURI() == "" && os.Getenv("INVOCATION_ID") != "" {
mustCreateCgroup = false
}
@@ -1451,9 +1439,14 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
// TODO: This should be a switch - we are not guaranteed that
// there are only 2 valid cgroup managers
cgroupParent := ctr.CgroupParent()
+ cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
+ Resource := ctr.Spec().Linux.Resources
+ cgroupResources, err := GetLimits(Resource)
+ if err != nil {
+ logrus.StandardLogger().Log(logLevel, "Could not get ctr resources")
+ }
if ctr.CgroupManager() == config.SystemdCgroupsManager {
unitName := createUnitName("libpod-conmon", ctr.ID())
-
realCgroupParent := cgroupParent
splitParent := strings.Split(cgroupParent, "/")
if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 {
@@ -1465,8 +1458,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to systemd sandbox cgroup: %v", err)
}
} else {
- cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
- control, err := cgroups.New(cgroupPath, &spec.LinuxResources{})
+ control, err := cgroups.New(cgroupPath, &cgroupResources)
if err != nil {
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
} else if err := control.AddPid(cmd.Process.Pid); err != nil {
@@ -1555,7 +1547,7 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int,
}
}
}
- return -1, errors.Wrapf(ss.err, "container create failed (no logs from conmon)")
+ return -1, fmt.Errorf("container create failed (no logs from conmon): %w", ss.err)
}
logrus.Debugf("Received: %d", ss.si.Data)
if ss.si.Data < 0 {
@@ -1572,11 +1564,11 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int,
if ss.si.Message != "" {
return ss.si.Data, getOCIRuntimeError(runtimeName, ss.si.Message)
}
- return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed")
+ return ss.si.Data, fmt.Errorf("container create failed: %w", define.ErrInternal)
}
data = ss.si.Data
case <-time.After(define.ContainerCreateTimeout):
- return -1, errors.Wrapf(define.ErrInternal, "container creation timeout")
+ return -1, fmt.Errorf("container creation timeout: %w", define.ErrInternal)
}
return data, nil
}
@@ -1748,3 +1740,191 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter,
}
}
}
+
+// GetLimits converts spec resource limits to cgroup consumable limits
+func GetLimits(resource *spec.LinuxResources) (runcconfig.Resources, error) {
+ if resource == nil {
+ resource = &spec.LinuxResources{}
+ }
+ final := &runcconfig.Resources{}
+ devs := []*devices.Rule{}
+
+ // Devices
+ for _, entry := range resource.Devices {
+ if entry.Major == nil || entry.Minor == nil {
+ continue
+ }
+ runeType := 'a'
+ switch entry.Type {
+ case "b":
+ runeType = 'b'
+ case "c":
+ runeType = 'c'
+ }
+
+ devs = append(devs, &devices.Rule{
+ Type: devices.Type(runeType),
+ Major: *entry.Major,
+ Minor: *entry.Minor,
+ Permissions: devices.Permissions(entry.Access),
+ Allow: entry.Allow,
+ })
+ }
+ final.Devices = devs
+
+ // HugepageLimits
+ pageLimits := []*runcconfig.HugepageLimit{}
+ for _, entry := range resource.HugepageLimits {
+ pageLimits = append(pageLimits, &runcconfig.HugepageLimit{
+ Pagesize: entry.Pagesize,
+ Limit: entry.Limit,
+ })
+ }
+ final.HugetlbLimit = pageLimits
+
+ // Networking
+ netPriorities := []*runcconfig.IfPrioMap{}
+ if resource.Network != nil {
+ for _, entry := range resource.Network.Priorities {
+ netPriorities = append(netPriorities, &runcconfig.IfPrioMap{
+ Interface: entry.Name,
+ Priority: int64(entry.Priority),
+ })
+ }
+ }
+ final.NetPrioIfpriomap = netPriorities
+ rdma := make(map[string]runcconfig.LinuxRdma)
+ for name, entry := range resource.Rdma {
+ rdma[name] = runcconfig.LinuxRdma{HcaHandles: entry.HcaHandles, HcaObjects: entry.HcaObjects}
+ }
+ final.Rdma = rdma
+
+ // Memory
+ if resource.Memory != nil {
+ if resource.Memory.Limit != nil {
+ final.Memory = *resource.Memory.Limit
+ }
+ if resource.Memory.Reservation != nil {
+ final.MemoryReservation = *resource.Memory.Reservation
+ }
+ if resource.Memory.Swap != nil {
+ final.MemorySwap = *resource.Memory.Swap
+ }
+ if resource.Memory.Swappiness != nil {
+ final.MemorySwappiness = resource.Memory.Swappiness
+ }
+ }
+
+ // CPU
+ if resource.CPU != nil {
+ if resource.CPU.Period != nil {
+ final.CpuPeriod = *resource.CPU.Period
+ }
+ if resource.CPU.Quota != nil {
+ final.CpuQuota = *resource.CPU.Quota
+ }
+ if resource.CPU.RealtimePeriod != nil {
+ final.CpuRtPeriod = *resource.CPU.RealtimePeriod
+ }
+ if resource.CPU.RealtimeRuntime != nil {
+ final.CpuRtRuntime = *resource.CPU.RealtimeRuntime
+ }
+ if resource.CPU.Shares != nil {
+ final.CpuShares = *resource.CPU.Shares
+ }
+ final.CpusetCpus = resource.CPU.Cpus
+ final.CpusetMems = resource.CPU.Mems
+ }
+
+ // BlkIO
+ if resource.BlockIO != nil {
+ if len(resource.BlockIO.ThrottleReadBpsDevice) > 0 {
+ for _, entry := range resource.BlockIO.ThrottleReadBpsDevice {
+ throttle := &runcconfig.ThrottleDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ throttle.BlockIODevice = *dev
+ throttle.Rate = entry.Rate
+ final.BlkioThrottleReadBpsDevice = append(final.BlkioThrottleReadBpsDevice, throttle)
+ }
+ }
+ if len(resource.BlockIO.ThrottleWriteBpsDevice) > 0 {
+ for _, entry := range resource.BlockIO.ThrottleWriteBpsDevice {
+ throttle := &runcconfig.ThrottleDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ throttle.BlockIODevice = *dev
+ throttle.Rate = entry.Rate
+ final.BlkioThrottleWriteBpsDevice = append(final.BlkioThrottleWriteBpsDevice, throttle)
+ }
+ }
+ if len(resource.BlockIO.ThrottleReadIOPSDevice) > 0 {
+ for _, entry := range resource.BlockIO.ThrottleReadIOPSDevice {
+ throttle := &runcconfig.ThrottleDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ throttle.BlockIODevice = *dev
+ throttle.Rate = entry.Rate
+ final.BlkioThrottleReadIOPSDevice = append(final.BlkioThrottleReadIOPSDevice, throttle)
+ }
+ }
+ if len(resource.BlockIO.ThrottleWriteIOPSDevice) > 0 {
+ for _, entry := range resource.BlockIO.ThrottleWriteIOPSDevice {
+ throttle := &runcconfig.ThrottleDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ throttle.BlockIODevice = *dev
+ throttle.Rate = entry.Rate
+ final.BlkioThrottleWriteIOPSDevice = append(final.BlkioThrottleWriteIOPSDevice, throttle)
+ }
+ }
+ if resource.BlockIO.LeafWeight != nil {
+ final.BlkioLeafWeight = *resource.BlockIO.LeafWeight
+ }
+ if resource.BlockIO.Weight != nil {
+ final.BlkioWeight = *resource.BlockIO.Weight
+ }
+ if len(resource.BlockIO.WeightDevice) > 0 {
+ for _, entry := range resource.BlockIO.WeightDevice {
+ weight := &runcconfig.WeightDevice{}
+ dev := &runcconfig.BlockIODevice{
+ Major: entry.Major,
+ Minor: entry.Minor,
+ }
+ if entry.Weight != nil {
+ weight.Weight = *entry.Weight
+ }
+ if entry.LeafWeight != nil {
+ weight.LeafWeight = *entry.LeafWeight
+ }
+ weight.BlockIODevice = *dev
+ final.BlkioWeightDevice = append(final.BlkioWeightDevice, weight)
+ }
+ }
+ }
+
+ // Pids
+ if resource.Pids != nil {
+ final.PidsLimit = resource.Pids.Limit
+ }
+
+ // Networking
+ if resource.Network != nil {
+ if resource.Network.ClassID != nil {
+ final.NetClsClassid = *resource.Network.ClassID
+ }
+ }
+
+ // Unified state
+ final.Unified = resource.Unified
+
+ return *final, nil
+}
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 86f54c02e..2ab2b4577 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -6,8 +6,8 @@ import (
"path/filepath"
"sync"
+ "github.com/containers/common/pkg/resize"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -108,24 +108,29 @@ func (r *MissingRuntime) UnpauseContainer(ctr *Container) error {
return r.printError()
}
+// Attach is not available as the runtime is missing
+func (r *MissingRuntime) Attach(ctr *Container, params *AttachOptions) error {
+ return r.printError()
+}
+
// HTTPAttach is not available as the runtime is missing
func (r *MissingRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) error {
return r.printError()
}
// AttachResize is not available as the runtime is missing
-func (r *MissingRuntime) AttachResize(ctr *Container, newSize define.TerminalSize) error {
+func (r *MissingRuntime) AttachResize(ctr *Container, newSize resize.TerminalSize) error {
return r.printError()
}
// ExecContainer is not available as the runtime is missing
-func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *define.TerminalSize) (int, chan error, error) {
+func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *resize.TerminalSize) (int, chan error, error) {
return -1, nil, r.printError()
}
// ExecContainerHTTP is not available as the runtime is missing
func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, req *http.Request, w http.ResponseWriter,
- streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *define.TerminalSize) (int, chan error, error) {
+ streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *resize.TerminalSize) (int, chan error, error) {
return -1, nil, r.printError()
}
@@ -135,7 +140,7 @@ func (r *MissingRuntime) ExecContainerDetached(ctr *Container, sessionID string,
}
// ExecAttachResize is not available as the runtime is missing.
-func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error {
+func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize resize.TerminalSize) error {
return r.printError()
}
@@ -204,7 +209,7 @@ func (r *MissingRuntime) ExecAttachSocketPath(ctr *Container, sessionID string)
// the container, but Conmon should still place an exit file for it.
func (r *MissingRuntime) ExitFilePath(ctr *Container) (string, error) {
if ctr == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path")
+ return "", fmt.Errorf("must provide a valid container to get exit file path: %w", define.ErrInvalidArg)
}
return filepath.Join(r.exitsDir, ctr.ID()), nil
}
@@ -222,5 +227,5 @@ func (r *MissingRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeIn
// Return an error indicating the runtime is missing
func (r *MissingRuntime) printError() error {
- return errors.Wrapf(define.ErrOCIRuntimeNotFound, "runtime %s is missing", r.name)
+ return fmt.Errorf("runtime %s is missing: %w", r.name, define.ErrOCIRuntimeNotFound)
}
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index 64edfdef2..e118348fa 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -70,7 +69,7 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
addr, err = net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", hostIP, port))
}
if err != nil {
- return nil, errors.Wrapf(err, "cannot resolve the UDP address")
+ return nil, fmt.Errorf("cannot resolve the UDP address: %w", err)
}
proto := "udp4"
@@ -79,11 +78,11 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
}
server, err := net.ListenUDP(proto, addr)
if err != nil {
- return nil, errors.Wrapf(err, "cannot listen on the UDP port")
+ return nil, fmt.Errorf("cannot listen on the UDP port: %w", err)
}
file, err = server.File()
if err != nil {
- return nil, errors.Wrapf(err, "cannot get file for UDP socket")
+ return nil, fmt.Errorf("cannot get file for UDP socket: %w", err)
}
// close the listener
// note that this does not affect the fd, see the godoc for server.File()
@@ -103,7 +102,7 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
addr, err = net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", hostIP, port))
}
if err != nil {
- return nil, errors.Wrapf(err, "cannot resolve the TCP address")
+ return nil, fmt.Errorf("cannot resolve the TCP address: %w", err)
}
proto := "tcp4"
@@ -112,11 +111,11 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
}
server, err := net.ListenTCP(proto, addr)
if err != nil {
- return nil, errors.Wrapf(err, "cannot listen on the TCP port")
+ return nil, fmt.Errorf("cannot listen on the TCP port: %w", err)
}
file, err = server.File()
if err != nil {
- return nil, errors.Wrapf(err, "cannot get file for TCP socket")
+ return nil, fmt.Errorf("cannot get file for TCP socket: %w", err)
}
// close the listener
// note that this does not affect the fd, see the godoc for server.File()
@@ -144,14 +143,14 @@ func getOCIRuntimeError(name, runtimeMsg string) error {
if includeFullOutput {
errStr = runtimeMsg
}
- return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrOCIRuntimePermissionDenied)
}
if match := regexp.MustCompile("(?i).*executable file not found in.*|.*no such file or directory.*").FindString(runtimeMsg); match != "" {
errStr := match
if includeFullOutput {
errStr = runtimeMsg
}
- return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrOCIRuntimeNotFound)
}
if match := regexp.MustCompile("`/proc/[a-z0-9-].+/attr.*`").FindString(runtimeMsg); match != "" {
errStr := match
@@ -159,11 +158,11 @@ func getOCIRuntimeError(name, runtimeMsg string) error {
errStr = runtimeMsg
}
if strings.HasSuffix(match, "/exec`") {
- return errors.Wrapf(define.ErrSetSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrSetSecurityAttribute)
} else if strings.HasSuffix(match, "/current`") {
- return errors.Wrapf(define.ErrGetSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrGetSecurityAttribute)
}
- return errors.Wrapf(define.ErrSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrSecurityAttribute)
}
- return errors.Wrapf(define.ErrOCIRuntime, "%s: %s", name, strings.Trim(runtimeMsg, "\n"))
+ return fmt.Errorf("%s: %s: %w", name, strings.Trim(runtimeMsg, "\n"), define.ErrOCIRuntime)
}
diff --git a/libpod/options.go b/libpod/options.go
index feb89510f..f03980017 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"net"
"os"
@@ -12,6 +13,7 @@ import (
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/secrets"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v4/libpod/define"
@@ -24,7 +26,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -140,7 +141,7 @@ func WithOCIRuntime(runtime string) RuntimeOption {
}
if runtime == "" {
- return errors.Wrapf(define.ErrInvalidArg, "must provide a valid path")
+ return fmt.Errorf("must provide a valid path: %w", define.ErrInvalidArg)
}
rt.config.Engine.OCIRuntime = runtime
@@ -158,7 +159,7 @@ func WithConmonPath(path string) RuntimeOption {
}
if path == "" {
- return errors.Wrapf(define.ErrInvalidArg, "must provide a valid path")
+ return fmt.Errorf("must provide a valid path: %w", define.ErrInvalidArg)
}
rt.config.Engine.ConmonPath = []string{path}
@@ -218,8 +219,8 @@ func WithCgroupManager(manager string) RuntimeOption {
}
if manager != config.CgroupfsCgroupsManager && manager != config.SystemdCgroupsManager {
- return errors.Wrapf(define.ErrInvalidArg, "Cgroup manager must be one of %s and %s",
- config.CgroupfsCgroupsManager, config.SystemdCgroupsManager)
+ return fmt.Errorf("cgroup manager must be one of %s and %s: %w",
+ config.CgroupfsCgroupsManager, config.SystemdCgroupsManager, define.ErrInvalidArg)
}
rt.config.Engine.CgroupManager = manager
@@ -249,7 +250,7 @@ func WithRegistriesConf(path string) RuntimeOption {
logrus.Debugf("Setting custom registries.conf: %q", path)
return func(rt *Runtime) error {
if _, err := os.Stat(path); err != nil {
- return errors.Wrap(err, "locating specified registries.conf")
+ return fmt.Errorf("locating specified registries.conf: %w", err)
}
if rt.imageContext == nil {
rt.imageContext = &types.SystemContext{
@@ -271,7 +272,7 @@ func WithHooksDir(hooksDirs ...string) RuntimeOption {
for _, hooksDir := range hooksDirs {
if hooksDir == "" {
- return errors.Wrap(define.ErrInvalidArg, "empty-string hook directories are not supported")
+ return fmt.Errorf("empty-string hook directories are not supported: %w", define.ErrInvalidArg)
}
}
@@ -434,6 +435,21 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
}
}
+// WithReset instructs libpod to reset all storage to factory defaults.
+// All containers, pods, volumes, images, and networks will be removed.
+// All directories created by Libpod will be removed.
+func WithReset() RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return define.ErrRuntimeFinalized
+ }
+
+ rt.doReset = true
+
+ return nil
+ }
+}
+
// WithRenumber instructs libpod to perform a lock renumbering while
// initializing. This will handle migrations from early versions of libpod with
// file locks to newer versions with SHM locking, as well as changes in the
@@ -478,7 +494,7 @@ func WithMigrateRuntime(requestedRuntime string) RuntimeOption {
}
if requestedRuntime == "" {
- return errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty name for new runtime")
+ return fmt.Errorf("must provide a non-empty name for new runtime: %w", define.ErrInvalidArg)
}
rt.migrateRuntime = requestedRuntime
@@ -497,7 +513,7 @@ func WithEventsLogger(logger string) RuntimeOption {
}
if !events.IsValidEventer(logger) {
- return errors.Wrapf(define.ErrInvalidArg, "%q is not a valid events backend", logger)
+ return fmt.Errorf("%q is not a valid events backend: %w", logger, define.ErrInvalidArg)
}
rt.config.Engine.EventsLogger = logger
@@ -605,8 +621,8 @@ func WithSdNotifyMode(mode string) CtrCreateOption {
}
// verify values
- if len(mode) > 0 && !util.StringInSlice(strings.ToLower(mode), SdNotifyModeValues) {
- return errors.Wrapf(define.ErrInvalidArg, "--sdnotify values must be one of %q", strings.Join(SdNotifyModeValues, ", "))
+ if len(mode) > 0 && !cutil.StringInSlice(strings.ToLower(mode), SdNotifyModeValues) {
+ return fmt.Errorf("--sdnotify values must be one of %q: %w", strings.Join(SdNotifyModeValues, ", "), define.ErrInvalidArg)
}
ctr.config.SdNotifyMode = mode
@@ -754,9 +770,9 @@ func WithStopSignal(signal syscall.Signal) CtrCreateOption {
}
if signal == 0 {
- return errors.Wrapf(define.ErrInvalidArg, "stop signal cannot be 0")
+ return fmt.Errorf("stop signal cannot be 0: %w", define.ErrInvalidArg)
} else if signal > 64 {
- return errors.Wrapf(define.ErrInvalidArg, "stop signal cannot be greater than 64 (SIGRTMAX)")
+ return fmt.Errorf("stop signal cannot be greater than 64 (SIGRTMAX): %w", define.ErrInvalidArg)
}
ctr.config.StopSignal = uint(signal)
@@ -1064,11 +1080,11 @@ func WithLogDriver(driver string) CtrCreateOption {
}
switch driver {
case "":
- return errors.Wrapf(define.ErrInvalidArg, "log driver must be set")
+ return fmt.Errorf("log driver must be set: %w", define.ErrInvalidArg)
case define.JournaldLogging, define.KubernetesLogging, define.JSONLogging, define.NoLogging, define.PassthroughLogging:
break
default:
- return errors.Wrapf(define.ErrInvalidArg, "invalid log driver")
+ return fmt.Errorf("invalid log driver: %w", define.ErrInvalidArg)
}
ctr.config.LogDriver = driver
@@ -1084,7 +1100,7 @@ func WithLogPath(path string) CtrCreateOption {
return define.ErrCtrFinalized
}
if path == "" {
- return errors.Wrapf(define.ErrInvalidArg, "log path must be set")
+ return fmt.Errorf("log path must be set: %w", define.ErrInvalidArg)
}
ctr.config.LogPath = path
@@ -1100,7 +1116,7 @@ func WithLogTag(tag string) CtrCreateOption {
return define.ErrCtrFinalized
}
if tag == "" {
- return errors.Wrapf(define.ErrInvalidArg, "log tag must be set")
+ return fmt.Errorf("log tag must be set: %w", define.ErrInvalidArg)
}
ctr.config.LogTag = tag
@@ -1123,7 +1139,7 @@ func WithCgroupsMode(mode string) CtrCreateOption {
case "enabled", "no-conmon", cgroupSplit:
ctr.config.CgroupsMode = mode
default:
- return errors.Wrapf(define.ErrInvalidArg, "Invalid cgroup mode %q", mode)
+ return fmt.Errorf("invalid cgroup mode %q: %w", mode, define.ErrInvalidArg)
}
return nil
@@ -1138,7 +1154,7 @@ func WithCgroupParent(parent string) CtrCreateOption {
}
if parent == "" {
- return errors.Wrapf(define.ErrInvalidArg, "cgroup parent cannot be empty")
+ return fmt.Errorf("cgroup parent cannot be empty: %w", define.ErrInvalidArg)
}
ctr.config.CgroupParent = parent
@@ -1168,7 +1184,7 @@ func WithDNS(dnsServers []string) CtrCreateOption {
for _, i := range dnsServers {
result := net.ParseIP(i)
if result == nil {
- return errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i)
+ return fmt.Errorf("invalid IP address %s: %w", i, define.ErrInvalidArg)
}
dns = append(dns, result)
}
@@ -1185,7 +1201,7 @@ func WithDNSOption(dnsOptions []string) CtrCreateOption {
return define.ErrCtrFinalized
}
if ctr.config.UseImageResolvConf {
- return errors.Wrapf(define.ErrInvalidArg, "cannot add DNS options if container will not create /etc/resolv.conf")
+ return fmt.Errorf("cannot add DNS options if container will not create /etc/resolv.conf: %w", define.ErrInvalidArg)
}
ctr.config.DNSOption = append(ctr.config.DNSOption, dnsOptions...)
return nil
@@ -1359,7 +1375,7 @@ func WithRestartPolicy(policy string) CtrCreateOption {
case define.RestartPolicyNone, define.RestartPolicyNo, define.RestartPolicyOnFailure, define.RestartPolicyAlways, define.RestartPolicyUnlessStopped:
ctr.config.RestartPolicy = policy
default:
- return errors.Wrapf(define.ErrInvalidArg, "%q is not a valid restart policy", policy)
+ return fmt.Errorf("%q is not a valid restart policy: %w", policy, define.ErrInvalidArg)
}
return nil
@@ -1391,7 +1407,7 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
for _, vol := range volumes {
mountOpts, err := util.ProcessOptions(vol.Options, false, "")
if err != nil {
- return errors.Wrapf(err, "processing options for named volume %q mounted at %q", vol.Name, vol.Dest)
+ return fmt.Errorf("processing options for named volume %q mounted at %q: %w", vol.Name, vol.Dest, err)
}
ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{
@@ -1677,6 +1693,18 @@ func withSetAnon() VolumeCreateOption {
}
}
+// WithVolumeDriverTimeout sets the volume creation timeout period
+func WithVolumeDriverTimeout(timeout int) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return define.ErrVolumeFinalized
+ }
+
+ volume.config.Timeout = timeout
+ return nil
+ }
+}
+
// WithTimezone sets the timezone in the container
func WithTimezone(path string) CtrCreateOption {
return func(ctr *Container) error {
@@ -1692,7 +1720,7 @@ func WithTimezone(path string) CtrCreateOption {
}
// We don't want to mount a timezone directory
if file.IsDir() {
- return errors.New("Invalid timezone: is a directory")
+ return errors.New("invalid timezone: is a directory")
}
}
@@ -1708,7 +1736,7 @@ func WithUmask(umask string) CtrCreateOption {
return define.ErrCtrFinalized
}
if !define.UmaskRegex.MatchString(umask) {
- return errors.Wrapf(define.ErrInvalidArg, "Invalid umask string %s", umask)
+ return fmt.Errorf("invalid umask string %s: %w", umask, define.ErrInvalidArg)
}
ctr.config.Umask = umask
return nil
@@ -1781,7 +1809,7 @@ func WithInitCtrType(containerType string) CtrCreateOption {
ctr.config.InitContainerType = containerType
return nil
}
- return errors.Errorf("%s is invalid init container type", containerType)
+ return fmt.Errorf("%s is invalid init container type", containerType)
}
}
@@ -1796,7 +1824,7 @@ func WithHostDevice(dev []specs.LinuxDevice) CtrCreateOption {
}
}
-// WithSelectedPasswordManagement makes it so that the container either does or does not setup /etc/passwd or /etc/group
+// WithSelectedPasswordManagement makes it so that the container either does or does not set up /etc/passwd or /etc/group
func WithSelectedPasswordManagement(passwd *bool) CtrCreateOption {
return func(c *Container) error {
if c.valid {
@@ -1815,12 +1843,12 @@ func WithInfraConfig(compatibleOptions InfraInherit) CtrCreateOption {
}
compatMarshal, err := json.Marshal(compatibleOptions)
if err != nil {
- return errors.New("Could not marshal compatible options")
+ return errors.New("could not marshal compatible options")
}
err = json.Unmarshal(compatMarshal, ctr.config)
if err != nil {
- return errors.New("Could not unmarshal compatible options into contrainer config")
+ return errors.New("could not unmarshal compatible options into contrainer config")
}
return nil
}
@@ -2158,3 +2186,17 @@ func WithPasswdEntry(passwdEntry string) CtrCreateOption {
return nil
}
}
+
+// WithMountAllDevices sets the option to mount all of a privileged container's
+// host devices
+func WithMountAllDevices() CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ ctr.config.MountAllDevices = true
+
+ return nil
+ }
+}
diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go
index a6d66a034..0a5eaae53 100644
--- a/libpod/plugin/volume_api.go
+++ b/libpod/plugin/volume_api.go
@@ -3,6 +3,7 @@ package plugin
import (
"bytes"
"context"
+ "fmt"
"io/ioutil"
"net"
"net/http"
@@ -12,19 +13,17 @@ import (
"sync"
"time"
+ "errors"
+
"github.com/containers/podman/v4/libpod/define"
"github.com/docker/go-plugins-helpers/sdk"
"github.com/docker/go-plugins-helpers/volume"
jsoniter "github.com/json-iterator/go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
-// TODO: We should add syntax for specifying plugins to containers.conf, and
-// support for loading based on that.
-
// Copied from docker/go-plugins-helpers/volume/api.go - not exported, so we
// need to do this to get at them.
// These are well-established paths that should not change unless the plugin API
@@ -38,8 +37,6 @@ var (
hostVirtualPath = "/VolumeDriver.Path"
mountPath = "/VolumeDriver.Mount"
unmountPath = "/VolumeDriver.Unmount"
- // nolint
- capabilitiesPath = "/VolumeDriver.Capabilities"
)
const (
@@ -80,7 +77,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
// Hit the Activate endpoint to find out if it is, and if so what kind
req, err := http.NewRequest("POST", "http://plugin"+activatePath, nil)
if err != nil {
- return errors.Wrapf(err, "error making request to volume plugin %s activation endpoint", newPlugin.Name)
+ return fmt.Errorf("error making request to volume plugin %s activation endpoint: %w", newPlugin.Name, err)
}
req.Header.Set("Host", newPlugin.getURI())
@@ -88,25 +85,25 @@ func validatePlugin(newPlugin *VolumePlugin) error {
resp, err := newPlugin.Client.Do(req)
if err != nil {
- return errors.Wrapf(err, "error sending request to plugin %s activation endpoint", newPlugin.Name)
+ return fmt.Errorf("error sending request to plugin %s activation endpoint: %w", newPlugin.Name, err)
}
defer resp.Body.Close()
// Response code MUST be 200. Anything else, we have to assume it's not
// a valid plugin.
if resp.StatusCode != 200 {
- return errors.Wrapf(ErrNotPlugin, "got status code %d from activation endpoint for plugin %s", resp.StatusCode, newPlugin.Name)
+ return fmt.Errorf("got status code %d from activation endpoint for plugin %s: %w", resp.StatusCode, newPlugin.Name, ErrNotPlugin)
}
// Read and decode the body so we can tell if this is a volume plugin.
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return errors.Wrapf(err, "error reading activation response body from plugin %s", newPlugin.Name)
+ return fmt.Errorf("error reading activation response body from plugin %s: %w", newPlugin.Name, err)
}
respStruct := new(activateResponse)
if err := json.Unmarshal(respBytes, respStruct); err != nil {
- return errors.Wrapf(err, "error unmarshalling plugin %s activation response", newPlugin.Name)
+ return fmt.Errorf("error unmarshalling plugin %s activation response: %w", newPlugin.Name, err)
}
foundVolume := false
@@ -118,7 +115,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
}
if !foundVolume {
- return errors.Wrapf(ErrNotVolumePlugin, "plugin %s does not implement volume plugin, instead provides %s", newPlugin.Name, strings.Join(respStruct.Implements, ", "))
+ return fmt.Errorf("plugin %s does not implement volume plugin, instead provides %s: %w", newPlugin.Name, strings.Join(respStruct.Implements, ", "), ErrNotVolumePlugin)
}
if plugins == nil {
@@ -132,7 +129,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
// GetVolumePlugin gets a single volume plugin, with the given name, at the
// given path.
-func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
+func GetVolumePlugin(name string, path string, timeout int) (*VolumePlugin, error) {
pluginsLock.Lock()
defer pluginsLock.Unlock()
@@ -140,7 +137,7 @@ func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
if exists {
// This shouldn't be possible, but just in case...
if plugin.SocketPath != filepath.Clean(path) {
- return nil, errors.Wrapf(define.ErrInvalidArg, "requested path %q for volume plugin %s does not match pre-existing path for plugin, %q", path, name, plugin.SocketPath)
+ return nil, fmt.Errorf("requested path %q for volume plugin %s does not match pre-existing path for plugin, %q: %w", path, name, plugin.SocketPath, define.ErrInvalidArg)
}
return plugin, nil
@@ -156,6 +153,13 @@ func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
// And since we can reuse it, might as well cache it.
client := new(http.Client)
client.Timeout = defaultTimeout
+ // if the user specified a non-zero timeout, use their value. Else, keep the default.
+ if timeout != 0 {
+ if time.Duration(timeout)*time.Second < defaultTimeout {
+ logrus.Warnf("the default timeout for volume creation is %d seconds, setting a time less than that may break this feature.", defaultTimeout)
+ }
+ client.Timeout = time.Duration(timeout) * time.Second
+ }
// This bit borrowed from pkg/bindings/connection.go
client.Transport = &http.Transport{
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
@@ -167,10 +171,10 @@ func GetVolumePlugin(name string, path string) (*VolumePlugin, error) {
stat, err := os.Stat(newPlugin.SocketPath)
if err != nil {
- return nil, errors.Wrapf(err, "cannot access plugin %s socket %q", name, newPlugin.SocketPath)
+ return nil, fmt.Errorf("cannot access plugin %s socket %q: %w", name, newPlugin.SocketPath, err)
}
if stat.Mode()&os.ModeSocket == 0 {
- return nil, errors.Wrapf(ErrNotPlugin, "volume %s path %q is not a unix socket", name, newPlugin.SocketPath)
+ return nil, fmt.Errorf("volume %s path %q is not a unix socket: %w", name, newPlugin.SocketPath, ErrNotPlugin)
}
if err := validatePlugin(newPlugin); err != nil {
@@ -185,40 +189,39 @@ func (p *VolumePlugin) getURI() string {
}
// Verify the plugin is still available.
-// TODO: Do we want to ping with an HTTP request? There's no ping endpoint so
-// we'd need to hit Activate or Capabilities?
+// Does not actually ping the API, just verifies that the socket still exists.
func (p *VolumePlugin) verifyReachable() error {
if _, err := os.Stat(p.SocketPath); err != nil {
if os.IsNotExist(err) {
pluginsLock.Lock()
defer pluginsLock.Unlock()
delete(plugins, p.Name)
- return errors.Wrapf(ErrPluginRemoved, p.Name)
+ return fmt.Errorf("%s: %w", p.Name, ErrPluginRemoved)
}
- return errors.Wrapf(err, "error accessing plugin %s", p.Name)
+ return fmt.Errorf("error accessing plugin %s: %w", p.Name, err)
}
return nil
}
// Send a request to the volume plugin for handling.
// Callers *MUST* close the response when they are done.
-func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint string) (*http.Response, error) {
+func (p *VolumePlugin) sendRequest(toJSON interface{}, endpoint string) (*http.Response, error) {
var (
reqJSON []byte
err error
)
- if hasBody {
+ if toJSON != nil {
reqJSON, err = json.Marshal(toJSON)
if err != nil {
- return nil, errors.Wrapf(err, "error marshalling request JSON for volume plugin %s endpoint %s", p.Name, endpoint)
+ return nil, fmt.Errorf("error marshalling request JSON for volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
}
}
req, err := http.NewRequest("POST", "http://plugin"+endpoint, bytes.NewReader(reqJSON))
if err != nil {
- return nil, errors.Wrapf(err, "error making request to volume plugin %s endpoint %s", p.Name, endpoint)
+ return nil, fmt.Errorf("error making request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
}
req.Header.Set("Host", p.getURI())
@@ -226,7 +229,7 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, hasBody bool, endpoint st
resp, err := p.Client.Do(req)
if err != nil {
- return nil, errors.Wrapf(err, "error sending request to volume plugin %s endpoint %s", p.Name, endpoint)
+ return nil, fmt.Errorf("error sending request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
}
// We are *deliberately not closing* response here. It is the
// responsibility of the caller to do so after reading the response.
@@ -240,9 +243,9 @@ func (p *VolumePlugin) makeErrorResponse(err, endpoint, volName string) error {
err = "empty error from plugin"
}
if volName != "" {
- return errors.Wrapf(errors.New(err), "error on %s on volume %s in volume plugin %s", endpoint, volName, p.Name)
+ return fmt.Errorf("error on %s on volume %s in volume plugin %s: %w", endpoint, volName, p.Name, errors.New(err))
}
- return errors.Wrapf(errors.New(err), "error on %s in volume plugin %s", endpoint, p.Name)
+ return fmt.Errorf("error on %s in volume plugin %s: %w", endpoint, p.Name, errors.New(err))
}
// Handle error responses from plugin
@@ -254,12 +257,12 @@ func (p *VolumePlugin) handleErrorResponse(resp *http.Response, endpoint, volNam
if resp.StatusCode != 200 {
errResp, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
errStruct := new(volume.ErrorResponse)
if err := json.Unmarshal(errResp, errStruct); err != nil {
- return errors.Wrapf(err, "error unmarshalling JSON response from volume plugin %s", p.Name)
+ return fmt.Errorf("error unmarshalling JSON response from volume plugin %s: %w", p.Name, err)
}
return p.makeErrorResponse(errStruct.Err, endpoint, volName)
@@ -271,7 +274,7 @@ func (p *VolumePlugin) handleErrorResponse(resp *http.Response, endpoint, volNam
// CreateVolume creates a volume in the plugin.
func (p *VolumePlugin) CreateVolume(req *volume.CreateRequest) error {
if req == nil {
- return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to CreateVolume")
+ return fmt.Errorf("must provide non-nil request to CreateVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -280,7 +283,7 @@ func (p *VolumePlugin) CreateVolume(req *volume.CreateRequest) error {
logrus.Infof("Creating volume %s using plugin %s", req.Name, p.Name)
- resp, err := p.sendRequest(req, true, createPath)
+ resp, err := p.sendRequest(req, createPath)
if err != nil {
return err
}
@@ -297,7 +300,7 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {
logrus.Infof("Listing volumes using plugin %s", p.Name)
- resp, err := p.sendRequest(nil, false, listPath)
+ resp, err := p.sendRequest(nil, listPath)
if err != nil {
return nil, err
}
@@ -307,15 +310,14 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {
return nil, err
}
- // TODO: Can probably unify response reading under a helper
volumeRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return nil, errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return nil, fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
volumeResp := new(volume.ListResponse)
if err := json.Unmarshal(volumeRespBytes, volumeResp); err != nil {
- return nil, errors.Wrapf(err, "error unmarshalling volume plugin %s list response", p.Name)
+ return nil, fmt.Errorf("error unmarshalling volume plugin %s list response: %w", p.Name, err)
}
return volumeResp.Volumes, nil
@@ -324,7 +326,7 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {
// GetVolume gets a single volume from the plugin.
func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error) {
if req == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to GetVolume")
+ return nil, fmt.Errorf("must provide non-nil request to GetVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -333,7 +335,7 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error)
logrus.Infof("Getting volume %s using plugin %s", req.Name, p.Name)
- resp, err := p.sendRequest(req, true, getPath)
+ resp, err := p.sendRequest(req, getPath)
if err != nil {
return nil, err
}
@@ -345,12 +347,12 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error)
getRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return nil, errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return nil, fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
getResp := new(volume.GetResponse)
if err := json.Unmarshal(getRespBytes, getResp); err != nil {
- return nil, errors.Wrapf(err, "error unmarshalling volume plugin %s get response", p.Name)
+ return nil, fmt.Errorf("error unmarshalling volume plugin %s get response: %w", p.Name, err)
}
return getResp.Volume, nil
@@ -359,7 +361,7 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error)
// RemoveVolume removes a single volume from the plugin.
func (p *VolumePlugin) RemoveVolume(req *volume.RemoveRequest) error {
if req == nil {
- return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to RemoveVolume")
+ return fmt.Errorf("must provide non-nil request to RemoveVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -368,7 +370,7 @@ func (p *VolumePlugin) RemoveVolume(req *volume.RemoveRequest) error {
logrus.Infof("Removing volume %s using plugin %s", req.Name, p.Name)
- resp, err := p.sendRequest(req, true, removePath)
+ resp, err := p.sendRequest(req, removePath)
if err != nil {
return err
}
@@ -380,7 +382,7 @@ func (p *VolumePlugin) RemoveVolume(req *volume.RemoveRequest) error {
// GetVolumePath gets the path the given volume is mounted at.
func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
if req == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to GetVolumePath")
+ return "", fmt.Errorf("must provide non-nil request to GetVolumePath: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -389,7 +391,7 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
logrus.Infof("Getting volume %s path using plugin %s", req.Name, p.Name)
- resp, err := p.sendRequest(req, true, hostVirtualPath)
+ resp, err := p.sendRequest(req, hostVirtualPath)
if err != nil {
return "", err
}
@@ -401,12 +403,12 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
pathRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return "", errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return "", fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
pathResp := new(volume.PathResponse)
if err := json.Unmarshal(pathRespBytes, pathResp); err != nil {
- return "", errors.Wrapf(err, "error unmarshalling volume plugin %s path response", p.Name)
+ return "", fmt.Errorf("error unmarshalling volume plugin %s path response: %w", p.Name, err)
}
return pathResp.Mountpoint, nil
@@ -417,7 +419,7 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
// the path the volume has been mounted at.
func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
if req == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to MountVolume")
+ return "", fmt.Errorf("must provide non-nil request to MountVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -426,7 +428,7 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
logrus.Infof("Mounting volume %s using plugin %s for container %s", req.Name, p.Name, req.ID)
- resp, err := p.sendRequest(req, true, mountPath)
+ resp, err := p.sendRequest(req, mountPath)
if err != nil {
return "", err
}
@@ -438,12 +440,12 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
mountRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return "", errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
+ return "", fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
}
mountResp := new(volume.MountResponse)
if err := json.Unmarshal(mountRespBytes, mountResp); err != nil {
- return "", errors.Wrapf(err, "error unmarshalling volume plugin %s path response", p.Name)
+ return "", fmt.Errorf("error unmarshalling volume plugin %s path response: %w", p.Name, err)
}
return mountResp.Mountpoint, nil
@@ -453,7 +455,7 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
// container that is unmounting, used for internal record-keeping by the plugin.
func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {
if req == nil {
- return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to UnmountVolume")
+ return fmt.Errorf("must provide non-nil request to UnmountVolume: %w", define.ErrInvalidArg)
}
if err := p.verifyReachable(); err != nil {
@@ -462,7 +464,7 @@ func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {
logrus.Infof("Unmounting volume %s using plugin %s for container %s", req.Name, p.Name, req.ID)
- resp, err := p.sendRequest(req, true, unmountPath)
+ resp, err := p.sendRequest(req, unmountPath)
if err != nil {
return err
}
diff --git a/libpod/pod.go b/libpod/pod.go
index 3c8dc43d4..e059c9416 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "errors"
"fmt"
"sort"
"strings"
@@ -10,7 +11,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/lock"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// Pod represents a group of containers that are managed together.
@@ -169,46 +169,42 @@ func (p *Pod) CPUQuota() int64 {
return 0
}
-// NetworkMode returns the Network mode given by the user ex: pod, private...
-func (p *Pod) NetworkMode() string {
+// MemoryLimit returns the pod Memory Limit
+func (p *Pod) MemoryLimit() uint64 {
+ if p.state.InfraContainerID == "" {
+ return 0
+ }
infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
if err != nil {
- return ""
+ return 0
}
- return infra.NetworkMode()
+ conf := infra.config.Spec
+ if conf != nil && conf.Linux != nil && conf.Linux.Resources != nil && conf.Linux.Resources.Memory != nil && conf.Linux.Resources.Memory.Limit != nil {
+ val := *conf.Linux.Resources.Memory.Limit
+ return uint64(val)
+ }
+ return 0
}
-// PidMode returns the PID mode given by the user ex: pod, private...
-func (p *Pod) PidMode() string {
+// NetworkMode returns the Network mode given by the user ex: pod, private...
+func (p *Pod) NetworkMode() string {
infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
if err != nil {
return ""
}
- ctrSpec := infra.config.Spec
- if ctrSpec != nil && ctrSpec.Linux != nil {
- for _, ns := range ctrSpec.Linux.Namespaces {
- if ns.Type == specs.PIDNamespace {
- if ns.Path != "" {
- return fmt.Sprintf("ns:%s", ns.Path)
- }
- return "private"
- }
- }
- return "host"
- }
- return ""
+ return infra.NetworkMode()
}
-// PidMode returns the PID mode given by the user ex: pod, private...
-func (p *Pod) UserNSMode() string {
- infra, err := p.infraContainer()
+// Namespace Mode returns the given NS mode provided by the user ex: host, private...
+func (p *Pod) NamespaceMode(kind specs.LinuxNamespaceType) string {
+ infra, err := p.runtime.GetContainer(p.state.InfraContainerID)
if err != nil {
return ""
}
ctrSpec := infra.config.Spec
if ctrSpec != nil && ctrSpec.Linux != nil {
for _, ns := range ctrSpec.Linux.Namespaces {
- if ns.Type == specs.UserNamespace {
+ if ns.Type == kind {
if ns.Path != "" {
return fmt.Sprintf("ns:%s", ns.Path)
}
@@ -316,7 +312,7 @@ func (p *Pod) CgroupPath() (string, error) {
return "", err
}
if p.state.InfraContainerID == "" {
- return "", errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
+ return "", fmt.Errorf("pod has no infra container: %w", define.ErrNoSuchCtr)
}
return p.state.CgroupPath, nil
}
@@ -390,7 +386,7 @@ func (p *Pod) infraContainer() (*Container, error) {
return nil, err
}
if id == "" {
- return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
+ return nil, fmt.Errorf("pod has no infra container: %w", define.ErrNoSuchCtr)
}
return p.runtime.state.Container(id)
@@ -430,7 +426,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerSta
newStats, err := c.GetContainerStats(previousContainerStats[c.ID()])
// If the container wasn't running, don't include it
// but also suppress the error
- if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid {
+ if err != nil && !errors.Is(err, define.ErrCtrStateInvalid) {
return nil, err
}
if err == nil {
@@ -471,3 +467,14 @@ func (p *Pod) initContainers() ([]*Container, error) {
}
return initCons, nil
}
+
+func (p *Pod) Config() (*PodConfig, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ conf := &PodConfig{}
+
+ err := JSONDeepCopy(p.config, conf)
+
+ return conf, err
+}
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index eede896a9..c1d54d55e 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"github.com/containers/common/pkg/cgroups"
@@ -9,7 +10,7 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/parallel"
"github.com/containers/podman/v4/pkg/rootless"
- "github.com/pkg/errors"
+ "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
@@ -31,7 +32,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
return err
}
if rc != 0 {
- return errors.Errorf("init container %s exited with code %d", initCon.ID(), rc)
+ return fmt.Errorf("init container %s exited with code %d", initCon.ID(), rc)
}
// If the container is a once init container, we need to remove it
// after it runs
@@ -41,7 +42,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
var time *uint
if err := p.runtime.removeContainer(ctx, initCon, false, false, true, time); err != nil {
icLock.Unlock()
- return errors.Wrapf(err, "failed to remove once init container %s", initCon.ID())
+ return fmt.Errorf("failed to remove once init container %s: %w", initCon.ID(), err)
}
// Removing a container this way requires an explicit call to clean up the db
if err := p.runtime.state.RemoveContainerFromPod(p, initCon); err != nil {
@@ -91,12 +92,12 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
// Build a dependency graph of containers in the pod
graph, err := BuildContainerGraph(allCtrs)
if err != nil {
- return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
+ return nil, fmt.Errorf("error generating dependency graph for pod %s: %w", p.ID(), err)
}
// If there are no containers without dependencies, we can't start
// Error out
if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ return nil, fmt.Errorf("no containers in pod %s have no dependencies, cannot start pod: %w", p.ID(), define.ErrNoSuchCtr)
}
ctrErrors := make(map[string]error)
@@ -108,7 +109,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error starting some containers")
+ return ctrErrors, fmt.Errorf("error starting some containers: %w", define.ErrPodPartialFail)
}
defer p.newPodEvent(events.Start)
return nil, nil
@@ -152,8 +153,8 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
return nil, err
}
- // TODO: There may be cases where it makes sense to order stops based on
- // dependencies. Should we bother with this?
+ // Stopping pods is not ordered by dependency. We haven't seen any case
+ // where this would actually matter.
ctrErrChan := make(map[string]<-chan error)
@@ -162,8 +163,9 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
c := ctr
logrus.Debugf("Adding parallel job to stop container %s", c.ID())
retChan := parallel.Enqueue(ctx, func() error {
- // TODO: Might be better to batch stop and cleanup
- // together?
+ // Can't batch these without forcing Stop() to hold the
+ // lock for the full duration of the timeout.
+ // We probably don't want to do that.
if timeout > -1 {
if err := c.StopWithTimeout(uint(timeout)); err != nil {
return err
@@ -191,7 +193,7 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -199,7 +201,7 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
+ return ctrErrors, fmt.Errorf("error stopping some containers: %w", define.ErrPodPartialFail)
}
if err := p.maybeStopServiceContainer(); err != nil {
@@ -295,7 +297,7 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -303,7 +305,7 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
+ return ctrErrors, fmt.Errorf("error cleaning up some containers: %w", define.ErrPodPartialFail)
}
if err := p.maybeStopServiceContainer(); err != nil {
@@ -336,10 +338,10 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
if rootless.IsRootless() {
cgroupv2, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
- return nil, errors.Wrap(err, "failed to determine cgroupversion")
+ return nil, fmt.Errorf("failed to determine cgroupversion: %w", err)
}
if !cgroupv2 {
- return nil, errors.Wrap(define.ErrNoCgroups, "can not pause pods containing rootless containers with cgroup V1")
+ return nil, fmt.Errorf("can not pause pods containing rootless containers with cgroup V1: %w", define.ErrNoCgroups)
}
}
@@ -366,7 +368,7 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -374,7 +376,7 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error pausing some containers")
+ return ctrErrors, fmt.Errorf("error pausing some containers: %w", define.ErrPodPartialFail)
}
return nil, nil
}
@@ -422,7 +424,7 @@ func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) {
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -430,7 +432,7 @@ func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error unpausing some containers")
+ return ctrErrors, fmt.Errorf("error unpausing some containers: %w", define.ErrPodPartialFail)
}
return nil, nil
}
@@ -468,7 +470,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
// Build a dependency graph of containers in the pod
graph, err := BuildContainerGraph(allCtrs)
if err != nil {
- return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
+ return nil, fmt.Errorf("error generating dependency graph for pod %s: %w", p.ID(), err)
}
ctrErrors := make(map[string]error)
@@ -477,7 +479,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
// If there are no containers without dependencies, we can't start
// Error out
if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ return nil, fmt.Errorf("no containers in pod %s have no dependencies, cannot start pod: %w", p.ID(), define.ErrNoSuchCtr)
}
// Traverse the graph beginning at nodes with no dependencies
@@ -486,7 +488,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
+ return ctrErrors, fmt.Errorf("error stopping some containers: %w", define.ErrPodPartialFail)
}
p.newPodEvent(events.Stop)
p.newPodEvent(events.Start)
@@ -537,7 +539,7 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
// Get returned error for every container we worked on
for id, channel := range ctrErrChan {
if err := <-channel; err != nil {
- if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
+ if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
continue
}
ctrErrors[id] = err
@@ -545,7 +547,7 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
+ return ctrErrors, fmt.Errorf("error killing some containers: %w", define.ErrPodPartialFail)
}
if err := p.maybeStopServiceContainer(); err != nil {
@@ -672,8 +674,9 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
infraConfig.CPUPeriod = p.CPUPeriod()
infraConfig.CPUQuota = p.CPUQuota()
infraConfig.CPUSetCPUs = p.ResourceLim().CPU.Cpus
- infraConfig.PidNS = p.PidMode()
- infraConfig.UserNS = p.UserNSMode()
+ infraConfig.PidNS = p.NamespaceMode(specs.PIDNamespace)
+ infraConfig.UserNS = p.NamespaceMode(specs.UserNamespace)
+ infraConfig.UtsNS = p.NamespaceMode(specs.UTSNamespace)
namedVolumes, mounts := infra.SortUserVolumes(infra.config.Spec)
inspectMounts, err = infra.GetMounts(namedVolumes, infra.config.ImageVolumes, mounts)
infraSecurity = infra.GetSecurityOptions()
@@ -749,6 +752,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
CPUSetCPUs: p.ResourceLim().CPU.Cpus,
CPUPeriod: p.CPUPeriod(),
CPUQuota: p.CPUQuota(),
+ MemoryLimit: p.MemoryLimit(),
Mounts: inspectMounts,
Devices: devices,
BlkioDeviceReadBps: deviceLimits,
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 41f745e6c..a86cd6d21 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -9,7 +9,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/storage/pkg/stringid"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -39,7 +38,7 @@ func (p *Pod) updatePod() error {
// Save pod state to database
func (p *Pod) save() error {
if err := p.runtime.state.SavePod(p); err != nil {
- return errors.Wrapf(err, "error saving pod %s state", p.ID())
+ return fmt.Errorf("error saving pod %s state: %w", p.ID(), err)
}
return nil
@@ -61,7 +60,7 @@ func (p *Pod) refresh() error {
// Retrieve the pod's lock
lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock %d for pod %s", p.config.LockID, p.ID())
+ return fmt.Errorf("error retrieving lock %d for pod %s: %w", p.config.LockID, p.ID(), err)
}
p.lock = lock
@@ -69,7 +68,7 @@ func (p *Pod) refresh() error {
if p.config.UsePodCgroup {
switch p.runtime.config.Engine.CgroupManager {
case config.SystemdCgroupsManager:
- cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()))
+ cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()), p.ResourceLim())
if err != nil {
logrus.Errorf("Creating Cgroup for pod %s: %v", p.ID(), err)
}
@@ -81,7 +80,7 @@ func (p *Pod) refresh() error {
logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
}
default:
- return errors.Wrapf(define.ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.Engine.CgroupManager)
+ return fmt.Errorf("unknown cgroups manager %s specified: %w", p.runtime.config.Engine.CgroupManager, define.ErrInvalidArg)
}
}
diff --git a/libpod/pod_top_linux.go b/libpod/pod_top_linux.go
index 83a070807..544126dcd 100644
--- a/libpod/pod_top_linux.go
+++ b/libpod/pod_top_linux.go
@@ -53,7 +53,7 @@ func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) {
}
}
- // TODO: psgo returns a [][]string to give users the ability to apply
+ // NOTE: psgo returns a [][]string to give users the ability to apply
// filters on the data. We need to change the API here to return
// a [][]string if we want to make use of filtering.
opts := psgo.JoinNamespaceOpts{FillMappings: rootless.IsRootless()}
diff --git a/libpod/reset.go b/libpod/reset.go
index 28d0ee3f6..b3ece03bf 100644
--- a/libpod/reset.go
+++ b/libpod/reset.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -13,12 +14,81 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+// removeAllDirs removes all Podman storage directories. It is intended to be
+// used as a backup for reset() when that function cannot be used due to
+// failures in initializing libpod.
+// It does not expect that all the directories match what is in use by Podman,
+// as this is a common failure point for `system reset`. As such, our ability to
+// interface with containers and pods is somewhat limited.
+// This function assumes that we do not have a working c/storage store.
+func (r *Runtime) removeAllDirs() error {
+ var lastErr error
+
+ // Grab the runtime alive lock.
+ // This ensures that no other Podman process can run while we are doing
+ // a reset, so no race conditions with containers/pods/etc being created
+ // while we are resetting storage.
+ // TODO: maybe want a helper for getting the path? This is duped from
+ // runtime.go
+ runtimeAliveLock := filepath.Join(r.config.Engine.TmpDir, "alive.lck")
+ aliveLock, err := storage.GetLockfile(runtimeAliveLock)
+ if err != nil {
+ logrus.Errorf("Lock runtime alive lock %s: %v", runtimeAliveLock, err)
+ } else {
+ aliveLock.Lock()
+ defer aliveLock.Unlock()
+ }
+
+ // We do not have a store - so we can't really try and remove containers
+ // or pods or volumes...
+ // Try and remove the directories, in hopes that they are unmounted.
+ // This is likely to fail but it's the best we can do.
+
+ // Volume path
+ if err := os.RemoveAll(r.config.Engine.VolumePath); err != nil {
+ lastErr = fmt.Errorf("removing volume path: %w", err)
+ }
+
+ // Tmpdir
+ if err := os.RemoveAll(r.config.Engine.TmpDir); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Reset: %v", lastErr)
+ }
+ lastErr = fmt.Errorf("removing tmp dir: %w", err)
+ }
+
+ // Runroot
+ if err := os.RemoveAll(r.storageConfig.RunRoot); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Reset: %v", lastErr)
+ }
+ lastErr = fmt.Errorf("removing run root: %w", err)
+ }
+
+ // Static dir
+ if err := os.RemoveAll(r.config.Engine.StaticDir); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Reset: %v", lastErr)
+ }
+ lastErr = fmt.Errorf("removing static dir: %w", err)
+ }
+
+ // Graph root
+ if err := os.RemoveAll(r.storageConfig.GraphRoot); err != nil {
+ if lastErr != nil {
+ logrus.Errorf("Reset: %v", lastErr)
+ }
+ lastErr = fmt.Errorf("removing graph root: %w", err)
+ }
+
+ return lastErr
+}
+
// Reset removes all storage
-func (r *Runtime) Reset(ctx context.Context) error {
+func (r *Runtime) reset(ctx context.Context) error {
var timeout *uint
pods, err := r.GetAllPods()
if err != nil {
@@ -26,7 +96,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
}
for _, p := range pods {
if err := r.RemovePod(ctx, p, true, true, timeout); err != nil {
- if errors.Cause(err) == define.ErrNoSuchPod {
+ if errors.Is(err, define.ErrNoSuchPod) {
continue
}
logrus.Errorf("Removing Pod %s: %v", p.ID(), err)
@@ -41,7 +111,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
for _, c := range ctrs {
if err := r.RemoveContainer(ctx, c, true, true, timeout); err != nil {
if err := r.RemoveStorageContainer(c.ID(), true); err != nil {
- if errors.Cause(err) == define.ErrNoSuchCtr {
+ if errors.Is(err, define.ErrNoSuchCtr) {
continue
}
logrus.Errorf("Removing container %s: %v", c.ID(), err)
@@ -64,7 +134,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
}
for _, v := range volumes {
if err := r.RemoveVolume(ctx, v, true, timeout); err != nil {
- if errors.Cause(err) == define.ErrNoSuchVolume {
+ if errors.Is(err, define.ErrNoSuchVolume) {
continue
}
logrus.Errorf("Removing volume %s: %v", v.config.Name, err)
@@ -94,7 +164,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
if prevError != nil {
logrus.Error(prevError)
}
- prevError = errors.Errorf("failed to remove runtime graph root dir %s, since it is the same as XDG_RUNTIME_DIR", graphRoot)
+ prevError = fmt.Errorf("failed to remove runtime graph root dir %s, since it is the same as XDG_RUNTIME_DIR", graphRoot)
} else {
if err := os.RemoveAll(graphRoot); err != nil {
if prevError != nil {
@@ -108,7 +178,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
if prevError != nil {
logrus.Error(prevError)
}
- prevError = errors.Errorf("failed to remove runtime root dir %s, since it is the same as XDG_RUNTIME_DIR", runRoot)
+ prevError = fmt.Errorf("failed to remove runtime root dir %s, since it is the same as XDG_RUNTIME_DIR", runRoot)
} else {
if err := os.RemoveAll(runRoot); err != nil {
if prevError != nil {
@@ -129,7 +199,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
if prevError != nil {
logrus.Error(prevError)
}
- prevError = errors.Errorf("failed to remove runtime tmpdir %s, since it is the same as XDG_RUNTIME_DIR", tempDir)
+ prevError = fmt.Errorf("failed to remove runtime tmpdir %s, since it is the same as XDG_RUNTIME_DIR", tempDir)
} else {
if err := os.RemoveAll(tempDir); err != nil {
if prevError != nil {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 4efa7b8e8..ea4b34954 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"context"
+ "errors"
"fmt"
"os"
"os/exec"
@@ -11,6 +12,7 @@ import (
"regexp"
"strconv"
"strings"
+ "sync"
"syscall"
"time"
@@ -39,7 +41,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/docker/docker/pkg/namesgenerator"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -87,14 +88,18 @@ type Runtime struct {
lockManager lock.Manager
// Worker
- workerShutdown chan bool
- workerChannel chan func()
+ workerChannel chan func()
+ workerGroup sync.WaitGroup
// syslog describes whenever logrus should log to the syslog as well.
// Note that the syslog hook will be enabled early in cmd/podman/syslog_linux.go
// This bool is just needed so that we can set it for netavark interface.
syslog bool
+ // doReset indicates that the runtime should perform a system reset.
+ // All Podman files will be removed.
+ doReset bool
+
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
// Once the runtime has been initialized and returned, this variable is
@@ -130,7 +135,7 @@ func SetXdgDirs() error {
return nil
}
- // Setup XDG_RUNTIME_DIR
+ // Set up XDG_RUNTIME_DIR
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
@@ -141,7 +146,7 @@ func SetXdgDirs() error {
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
- return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
+ return fmt.Errorf("cannot set XDG_RUNTIME_DIR: %w", err)
}
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
@@ -151,14 +156,14 @@ func SetXdgDirs() error {
}
}
- // Setup XDG_CONFIG_HOME
+ // Set up XDG_CONFIG_HOME
if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
if err != nil {
return err
}
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
- return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
+ return fmt.Errorf("cannot set XDG_CONFIG_HOME: %w", err)
}
}
return nil
@@ -209,7 +214,7 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
// Overwrite config with user-given configuration options
for _, opt := range options {
if err := opt(runtime); err != nil {
- return nil, errors.Wrapf(err, "error configuring runtime")
+ return nil, fmt.Errorf("error configuring runtime: %w", err)
}
}
@@ -220,12 +225,12 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
}
os.Exit(1)
return nil
- }); err != nil && errors.Cause(err) != shutdown.ErrHandlerExists {
+ }); err != nil && !errors.Is(err, shutdown.ErrHandlerExists) {
logrus.Errorf("Registering shutdown handler for libpod: %v", err)
}
if err := shutdown.Start(); err != nil {
- return nil, errors.Wrapf(err, "error starting shutdown signal handler")
+ return nil, fmt.Errorf("error starting shutdown signal handler: %w", err)
}
if err := makeRuntime(runtime); err != nil {
@@ -234,6 +239,11 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
runtime.config.CheckCgroupsAndAdjustConfig()
+ // If resetting storage, do *not* return a runtime.
+ if runtime.doReset {
+ return nil, nil
+ }
+
return runtime, nil
}
@@ -246,10 +256,10 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
manager, err = lock.OpenFileLockManager(lockPath)
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
manager, err = lock.NewFileLockManager(lockPath)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get new file lock manager")
+ return nil, fmt.Errorf("failed to get new file lock manager: %w", err)
}
} else {
return nil, err
@@ -265,19 +275,19 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
switch {
- case os.IsNotExist(errors.Cause(err)):
+ case errors.Is(err, os.ErrNotExist):
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
- return nil, errors.Wrapf(err, "failed to get new shm lock manager")
+ return nil, fmt.Errorf("failed to get new shm lock manager: %w", err)
}
- case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber:
+ case errors.Is(err, syscall.ERANGE) && runtime.doRenumber:
logrus.Debugf("Number of locks does not match - removing old locks")
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
- return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ return nil, fmt.Errorf("error removing libpod locks file %s: %w", lockPath, err)
}
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
@@ -289,7 +299,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
}
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType)
+ return nil, fmt.Errorf("unknown lock type %s: %w", runtime.config.Engine.LockType, define.ErrInvalidArg)
}
return manager, nil
}
@@ -304,11 +314,18 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
runtime.conmonPath = cPath
+ if runtime.noStore && runtime.doReset {
+ return fmt.Errorf("cannot perform system reset if runtime is not creating a store: %w", define.ErrInvalidArg)
+ }
+ if runtime.doReset && runtime.doRenumber {
+ return fmt.Errorf("cannot perform system reset while renumbering locks: %w", define.ErrInvalidArg)
+ }
+
// Make the static files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating runtime static files directory")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating runtime static files directory: %w", err)
}
}
@@ -320,9 +337,9 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// package.
switch runtime.config.Engine.StateType {
case config.InMemoryStateStore:
- return errors.Wrapf(define.ErrInvalidArg, "in-memory state is currently disabled")
+ return fmt.Errorf("in-memory state is currently disabled: %w", define.ErrInvalidArg)
case config.SQLiteStateStore:
- return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
+ return fmt.Errorf("SQLite state is currently disabled: %w", define.ErrInvalidArg)
case config.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
@@ -332,13 +349,27 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
runtime.state = state
default:
- return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType)
+ return fmt.Errorf("unrecognized state type passed (%v): %w", runtime.config.Engine.StateType, define.ErrInvalidArg)
}
// Grab config from the database so we can reset some defaults
dbConfig, err := runtime.state.GetDBConfig()
if err != nil {
- return errors.Wrapf(err, "error retrieving runtime configuration from database")
+ if runtime.doReset {
+ // We can at least delete the DB and the static files
+ // directory.
+ // Can't safely touch anything else because we aren't
+ // sure of other directories.
+ if err := runtime.state.Close(); err != nil {
+ logrus.Errorf("Closing database connection: %v", err)
+ } else {
+ if err := os.RemoveAll(runtime.config.Engine.StaticDir); err != nil {
+ logrus.Errorf("Removing static files directory %v: %v", runtime.config.Engine.StaticDir, err)
+ }
+ }
+ }
+
+ return fmt.Errorf("error retrieving runtime configuration from database: %w", err)
}
runtime.mergeDBConfig(dbConfig)
@@ -371,11 +402,17 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// Validate our config against the database, now that we've set our
// final storage configuration
if err := runtime.state.ValidateDBConfig(runtime); err != nil {
- return err
+ // If we are performing a storage reset: continue on with a
+ // warning. Otherwise we can't `system reset` after a change to
+ // the core paths.
+ if !runtime.doReset {
+ return err
+ }
+ logrus.Errorf("Runtime paths differ from those stored in database, storage reset may not remove all files")
}
if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil {
- return errors.Wrapf(err, "error setting libpod namespace in state")
+ return fmt.Errorf("error setting libpod namespace in state: %w", err)
}
logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace)
@@ -393,6 +430,14 @@ func makeRuntime(runtime *Runtime) (retErr error) {
} else if runtime.noStore {
logrus.Debug("No store required. Not opening container store.")
} else if err := runtime.configureStore(); err != nil {
+ // Make a best-effort attempt to clean up if performing a
+ // storage reset.
+ if runtime.doReset {
+ if err := runtime.removeAllDirs(); err != nil {
+ logrus.Errorf("Removing libpod directories: %v", err)
+ }
+ }
+
return err
}
defer func() {
@@ -405,13 +450,12 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
}()
- // Setup the eventer
+ // Set up the eventer
eventer, err := runtime.newEventer()
if err != nil {
return err
}
runtime.eventer = eventer
- // TODO: events for libimage
// Set up containers/image
if runtime.imageContext == nil {
@@ -424,16 +468,16 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating tmpdir")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating tmpdir: %w", err)
}
}
// Create events log dir
if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrap(err, "error creating events dirs")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating events dirs: %w", err)
}
}
@@ -470,7 +514,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
} else {
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
if !ok {
- return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime)
+ return fmt.Errorf("default OCI runtime %q not found: %w", runtime.config.Engine.OCIRuntime, define.ErrInvalidArg)
}
runtime.defaultOCIRuntime = ociRuntime
}
@@ -479,23 +523,23 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// Do we have at least one valid OCI runtime?
if len(runtime.ociRuntimes) == 0 {
- return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
+ return fmt.Errorf("no OCI runtime has been configured: %w", define.ErrInvalidArg)
}
// Do we have a default runtime?
if runtime.defaultOCIRuntime == nil {
- return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
+ return fmt.Errorf("no default OCI runtime was configured: %w", define.ErrInvalidArg)
}
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrapf(err, "error creating runtime temporary files directory")
+ if !errors.Is(err, os.ErrExist) {
+ return fmt.Errorf("error creating runtime temporary files directory: %w", err)
}
}
- // the store is only setup when we are in the userns so we do the same for the network interface
+ // the store is only set up when we are in the userns so we do the same for the network interface
if !needsUserns {
netBackend, netInterface, err := network.NetworkBackend(runtime.store, runtime.config, runtime.syslog)
if err != nil {
@@ -512,12 +556,10 @@ func makeRuntime(runtime *Runtime) (retErr error) {
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
if err != nil {
- return errors.Wrapf(err, "error acquiring runtime init lock")
+ return fmt.Errorf("error acquiring runtime init lock: %w", err)
}
// Acquire the lock and hold it until we return
// This ensures that no two processes will be in runtime.refresh at once
- // TODO: we can't close the FD in this lock, so we should keep it around
- // and use it to lock important operations
aliveLock.Lock()
doRefresh := false
defer func() {
@@ -544,7 +586,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
pausePid, err := util.GetRootlessPauseProcessPidPathGivenDir(runtime.config.Engine.TmpDir)
if err != nil {
- return errors.Wrapf(err, "could not get pause process pid file path")
+ return fmt.Errorf("could not get pause process pid file path: %w", err)
}
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
if err != nil {
@@ -565,10 +607,10 @@ func makeRuntime(runtime *Runtime) (retErr error) {
// This will trigger on first use as well, but refreshing an
// empty state only creates a single file
// As such, it's not really a performance concern
- if os.IsNotExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
doRefresh = true
} else {
- return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
+ return fmt.Errorf("error reading runtime status file %s: %w", runtimeAliveFile, err)
}
}
@@ -577,6 +619,18 @@ func makeRuntime(runtime *Runtime) (retErr error) {
return err
}
+ // If we're resetting storage, do it now.
+ // We will not return a valid runtime.
+ // TODO: Plumb this context out so it can be set.
+ if runtime.doReset {
+ // Mark the runtime as valid, so normal functionality "mostly"
+ // works and we can use regular functions to remove
+ // ctrs/pods/etc
+ runtime.valid = true
+
+ return runtime.reset(context.Background())
+ }
+
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
// runtime.
@@ -650,14 +704,14 @@ func findConmon(conmonPaths []string) (string, error) {
}
if foundOutdatedConmon {
- return "", errors.Wrapf(define.ErrConmonOutdated,
- "please update to v%d.%d.%d or later",
- conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion)
+ return "", fmt.Errorf(
+ "please update to v%d.%d.%d or later: %w",
+ conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion, define.ErrConmonOutdated)
}
- return "", errors.Wrapf(define.ErrInvalidArg,
- "could not find a working conmon binary (configured options: %v)",
- conmonPaths)
+ return "", fmt.Errorf(
+ "could not find a working conmon binary (configured options: %v): %w",
+ conmonPaths, define.ErrInvalidArg)
}
// probeConmon calls conmon --version and verifies it is a new enough version for
@@ -674,11 +728,11 @@ func probeConmon(conmonBinary string) error {
matches := r.FindStringSubmatch(out.String())
if len(matches) != 4 {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
major, err := strconv.Atoi(matches[1])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if major < conmonMinMajorVersion {
return define.ErrConmonOutdated
@@ -689,7 +743,7 @@ func probeConmon(conmonBinary string) error {
minor, err := strconv.Atoi(matches[2])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if minor < conmonMinMinorVersion {
return define.ErrConmonOutdated
@@ -700,7 +754,7 @@ func probeConmon(conmonBinary string) error {
patch, err := strconv.Atoi(matches[3])
if err != nil {
- return errors.Wrap(err, define.ErrConmonVersionFormat)
+ return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
}
if patch < conmonMinPatchVersion {
return define.ErrConmonOutdated
@@ -744,7 +798,7 @@ func (r *Runtime) GetConfig() (*config.Config, error) {
// Copy so the caller won't be able to modify the actual config
if err := JSONDeepCopy(rtConfig, config); err != nil {
- return nil, errors.Wrapf(err, "error copying config")
+ return nil, fmt.Errorf("error copying config: %w", err)
}
return config, nil
@@ -820,15 +874,12 @@ func (r *Runtime) DeferredShutdown(force bool) {
// still containers running or mounted
func (r *Runtime) Shutdown(force bool) error {
if !r.valid {
- return define.ErrRuntimeStopped
+ return nil
}
- if r.workerShutdown != nil {
- // Signal the worker routine to shutdown. The routine will
- // process all pending work items and then read from the
- // channel; we're blocked until all work items have been
- // processed.
- r.workerShutdown <- true
+ if r.workerChannel != nil {
+ r.workerGroup.Wait()
+ close(r.workerChannel)
}
r.valid = false
@@ -858,7 +909,7 @@ func (r *Runtime) Shutdown(force bool) error {
// Note that the libimage runtime shuts down the store.
if err := r.libimageRuntime.Shutdown(force); err != nil {
- lastError = errors.Wrapf(err, "error shutting down container storage")
+ lastError = fmt.Errorf("error shutting down container storage: %w", err)
}
}
if err := r.state.Close(); err != nil {
@@ -890,15 +941,15 @@ func (r *Runtime) refresh(alivePath string) error {
// Containers, pods, and volumes must also reacquire their locks.
ctrs, err := r.state.AllContainers()
if err != nil {
- return errors.Wrapf(err, "error retrieving all containers from state")
+ return fmt.Errorf("error retrieving all containers from state: %w", err)
}
pods, err := r.state.AllPods()
if err != nil {
- return errors.Wrapf(err, "error retrieving all pods from state")
+ return fmt.Errorf("error retrieving all pods from state: %w", err)
}
vols, err := r.state.AllVolumes()
if err != nil {
- return errors.Wrapf(err, "error retrieving all volumes from state")
+ return fmt.Errorf("error retrieving all volumes from state: %w", err)
}
// No locks are taken during pod, volume, and container refresh.
// Furthermore, the pod/volume/container refresh() functions are not
@@ -926,7 +977,7 @@ func (r *Runtime) refresh(alivePath string) error {
// Create a file indicating the runtime is alive and ready
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
- return errors.Wrap(err, "error creating runtime status file")
+ return fmt.Errorf("error creating runtime status file: %w", err)
}
defer file.Close()
@@ -947,13 +998,13 @@ func (r *Runtime) generateName() (string, error) {
// Make sure container with this name does not exist
if _, err := r.state.LookupContainer(name); err == nil {
continue
- } else if errors.Cause(err) != define.ErrNoSuchCtr {
+ } else if !errors.Is(err, define.ErrNoSuchCtr) {
return "", err
}
// Make sure pod with this name does not exist
if _, err := r.state.LookupPod(name); err == nil {
continue
- } else if errors.Cause(err) != define.ErrNoSuchPod {
+ } else if !errors.Is(err, define.ErrNoSuchPod) {
return "", err
}
return name, nil
@@ -1072,7 +1123,7 @@ func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" {
if r.storageConfig.GraphDriverName != dbConfig.GraphDriver &&
r.storageConfig.GraphDriverName != "" {
- logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
+ logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve. May prevent use of images created by other tools",
r.storageConfig.GraphDriverName, dbConfig.GraphDriver)
}
r.storageConfig.GraphDriverName = dbConfig.GraphDriver
@@ -1143,19 +1194,21 @@ func (r *Runtime) reloadStorageConf() error {
return nil
}
-// getVolumePlugin gets a specific volume plugin given its name.
-func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) {
+// getVolumePlugin gets a specific volume plugin.
+func (r *Runtime) getVolumePlugin(volConfig *VolumeConfig) (*plugin.VolumePlugin, error) {
// There is no plugin for local.
+ name := volConfig.Driver
+ timeout := volConfig.Timeout
if name == define.VolumeDriverLocal || name == "" {
return nil, nil
}
pluginPath, ok := r.config.Engine.VolumePlugins[name]
if !ok {
- return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name)
+ return nil, fmt.Errorf("no volume plugin with name %s available: %w", name, define.ErrMissingPlugin)
}
- return plugin.GetVolumePlugin(name, pluginPath)
+ return plugin.GetVolumePlugin(name, pluginPath, timeout)
}
// GetSecretsStorageDir returns the directory that the secrets manager should take
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
index 1c528e1b8..047375628 100644
--- a/libpod/runtime_cstorage.go
+++ b/libpod/runtime_cstorage.go
@@ -1,11 +1,12 @@
package libpod
import (
+ "errors"
+ "fmt"
"time"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,7 +39,7 @@ func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) {
// Look up if container is in state
hasCtr, err := r.state.HasContainer(ctr.ID)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up container %s in state", ctr.ID)
+ return nil, fmt.Errorf("error looking up container %s in state: %w", ctr.ID, err)
}
storageCtr.PresentInLibpod = hasCtr
@@ -60,20 +61,20 @@ func (r *Runtime) StorageContainer(idOrName string) (*storage.Container, error)
func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
targetID, err := r.store.Lookup(idOrName)
if err != nil {
- if errors.Cause(err) == storage.ErrLayerUnknown {
- return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID or name %q found", idOrName)
+ if errors.Is(err, storage.ErrLayerUnknown) {
+ return fmt.Errorf("no container with ID or name %q found: %w", idOrName, define.ErrNoSuchCtr)
}
- return errors.Wrapf(err, "error looking up container %q", idOrName)
+ return fmt.Errorf("error looking up container %q: %w", idOrName, err)
}
// Lookup returns an ID but it's not guaranteed to be a container ID.
// So we can still error here.
ctr, err := r.store.Container(targetID)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
- return errors.Wrapf(define.ErrNoSuchCtr, "%q does not refer to a container", idOrName)
+ if errors.Is(err, storage.ErrContainerUnknown) {
+ return fmt.Errorf("%q does not refer to a container: %w", idOrName, define.ErrNoSuchCtr)
}
- return errors.Wrapf(err, "error retrieving container %q", idOrName)
+ return fmt.Errorf("error retrieving container %q: %w", idOrName, err)
}
// Error out if the container exists in libpod
@@ -82,13 +83,13 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
return err
}
if exists {
- return errors.Wrapf(define.ErrCtrExists, "refusing to remove %q as it exists in libpod as container %s", idOrName, ctr.ID)
+ return fmt.Errorf("refusing to remove %q as it exists in libpod as container %s: %w", idOrName, ctr.ID, define.ErrCtrExists)
}
if !force {
timesMounted, err := r.store.Mounted(ctr.ID)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
// Container was removed from under us.
// It's gone, so don't bother erroring.
logrus.Infof("Storage for container %s already removed", ctr.ID)
@@ -97,7 +98,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
logrus.Warnf("Checking if container %q is mounted, attempting to delete: %v", idOrName, err)
}
if timesMounted > 0 {
- return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName)
+ return fmt.Errorf("container %q is mounted and cannot be removed without using force: %w", idOrName, define.ErrCtrStateInvalid)
}
} else if _, err := r.store.Unmount(ctr.ID, true); err != nil {
if errors.Is(err, storage.ErrContainerUnknown) {
@@ -109,12 +110,12 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
}
if err := r.store.DeleteContainer(ctr.ID); err != nil {
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
// Container again gone, no error
logrus.Infof("Storage for container %s already removed", ctr.ID)
return nil
}
- return errors.Wrapf(err, "error removing storage for container %q", idOrName)
+ return fmt.Errorf("error removing storage for container %q: %w", idOrName, err)
}
return nil
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 2eaa77572..ce0fd869d 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path"
@@ -13,6 +14,7 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/libpod/shutdown"
@@ -25,7 +27,6 @@ import (
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -85,7 +86,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
ctr, err := r.initContainerVariables(rSpec, config)
if err != nil {
- return nil, errors.Wrapf(err, "error initializing container variables")
+ return nil, fmt.Errorf("error initializing container variables: %w", err)
}
// For an imported checkpoint no one has ever set the StartedTime. Set it now.
ctr.state.StartedTime = time.Now()
@@ -125,7 +126,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
// the config was re-written.
newConf, err := r.state.GetContainerConfig(ctr.ID())
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", ctr.ID())
+ return nil, fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", ctr.ID(), err)
}
ctr.config = newConf
@@ -142,7 +143,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
// Set config back to the old name so reflect what is actually
// present in the DB.
ctr.config.Name = oldName
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
+ return nil, fmt.Errorf("error renaming container %s: %w", ctr.ID(), err)
}
// Step 3: rename the container in c/storage.
@@ -161,7 +162,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
if rSpec == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container")
+ return nil, fmt.Errorf("must provide a valid runtime spec to create container: %w", define.ErrInvalidArg)
}
ctr := new(Container)
ctr.config = new(ContainerConfig)
@@ -171,7 +172,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
ctr.config.ID = stringid.GenerateNonCryptoID()
size, err := units.FromHumanSize(r.config.Containers.ShmSize)
if err != nil {
- return nil, errors.Wrapf(err, "converting containers.conf ShmSize %s to an int", r.config.Containers.ShmSize)
+ return nil, fmt.Errorf("converting containers.conf ShmSize %s to an int: %w", r.config.Containers.ShmSize, err)
}
ctr.config.ShmSize = size
ctr.config.NoShm = false
@@ -183,7 +184,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
// This is a restore from an imported checkpoint
ctr.restoreFromCheckpoint = true
if err := JSONDeepCopy(config, ctr.config); err != nil {
- return nil, errors.Wrapf(err, "error copying container config for restore")
+ return nil, fmt.Errorf("error copying container config for restore: %w", err)
}
// If the ID is empty a new name for the restored container was requested
if ctr.config.ID == "" {
@@ -223,12 +224,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr, err = r.initContainerVariables(rSpec, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error initializing container variables")
+ return nil, fmt.Errorf("error initializing container variables: %w", err)
}
for _, option := range options {
if err := option(ctr); err != nil {
- return nil, errors.Wrapf(err, "error running container create option")
+ return nil, fmt.Errorf("error running container create option: %w", err)
}
}
@@ -246,8 +247,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
for _, opts := range ctr.config.Networks {
if opts.InterfaceName != "" {
// check that no name is assigned to more than network
- if util.StringInSlice(opts.InterfaceName, usedIfNames) {
- return nil, errors.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
+ if cutil.StringInSlice(opts.InterfaceName, usedIfNames) {
+ return nil, fmt.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
}
usedIfNames = append(usedIfNames, opts.InterfaceName)
}
@@ -262,7 +263,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
if opts.InterfaceName == "" {
for i < 100000 {
ifName := fmt.Sprintf("eth%d", i)
- if !util.StringInSlice(ifName, usedIfNames) {
+ if !cutil.StringInSlice(ifName, usedIfNames) {
opts.InterfaceName = ifName
usedIfNames = append(usedIfNames, ifName)
break
@@ -295,7 +296,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Allocate a lock for the container
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new container")
+ return nil, fmt.Errorf("error allocating lock for new container: %w", err)
}
ctr.lock = lock
ctr.config.LockID = ctr.lock.ID()
@@ -318,7 +319,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
} else {
ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime]
if !ok {
- return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime)
+ return nil, fmt.Errorf("requested OCI runtime %s is not available: %w", ctr.config.OCIRuntime, define.ErrInvalidArg)
}
ctr.ociRuntime = ociRuntime
}
@@ -326,7 +327,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Check NoCgroups support
if ctr.config.NoCgroups {
if !ctr.ociRuntime.SupportsNoCgroups() {
- return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not compatible with NoCgroups", ctr.ociRuntime.Name())
+ return nil, fmt.Errorf("requested OCI runtime %s is not compatible with NoCgroups: %w", ctr.ociRuntime.Name(), define.ErrInvalidArg)
}
}
@@ -335,7 +336,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Get the pod from state
pod, err = r.state.Pod(ctr.config.Pod)
if err != nil {
- return nil, errors.Wrapf(err, "cannot add container %s to pod %s", ctr.ID(), ctr.config.Pod)
+ return nil, fmt.Errorf("cannot add container %s to pod %s: %w", ctr.ID(), ctr.config.Pod, err)
}
}
@@ -349,14 +350,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
if pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra() {
podCgroup, err := pod.CgroupPath()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
+ return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err)
}
expectPodCgroup, err := ctr.expectPodCgroup()
if err != nil {
return nil, err
}
if expectPodCgroup && podCgroup == "" {
- return nil, errors.Wrapf(define.ErrInternal, "pod %s cgroup is not set", pod.ID())
+ return nil, fmt.Errorf("pod %s cgroup is not set: %w", pod.ID(), define.ErrInternal)
}
canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(podCgroup)
if canUseCgroup {
@@ -366,7 +367,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.CgroupParent = CgroupfsDefaultCgroupParent
}
} else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
}
case config.SystemdCgroupsManager:
if ctr.config.CgroupParent == "" {
@@ -374,7 +375,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
case pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra():
podCgroup, err := pod.CgroupPath()
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
+ return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err)
}
ctr.config.CgroupParent = podCgroup
case rootless.IsRootless() && ctr.config.CgroupsMode != cgroupSplit:
@@ -383,10 +384,10 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.CgroupParent = SystemdDefaultCgroupParent
}
} else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
+ return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
+ return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
}
}
@@ -469,8 +470,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctrNamedVolumes = append(ctrNamedVolumes, dbVol)
// The volume exists, we're good
continue
- } else if errors.Cause(err) != define.ErrNoSuchVolume {
- return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name)
+ } else if !errors.Is(err, define.ErrNoSuchVolume) {
+ return nil, fmt.Errorf("error retrieving named volume %s for new container: %w", vol.Name, err)
}
}
@@ -501,9 +502,9 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
volOptions = append(volOptions, parsedOptions...)
}
}
- newVol, err := r.newVolume(volOptions...)
+ newVol, err := r.newVolume(false, volOptions...)
if err != nil {
- return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name)
+ return nil, fmt.Errorf("error creating named volume %q: %w", vol.Name, err)
}
ctrNamedVolumes = append(ctrNamedVolumes, newVol)
@@ -526,7 +527,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
ctr.config.ShmDir = filepath.Join(ctr.bundlePath(), "shm")
if err := os.MkdirAll(ctr.config.ShmDir, 0700); err != nil {
if !os.IsExist(err) {
- return nil, errors.Wrap(err, "unable to create shm dir")
+ return nil, fmt.Errorf("unable to create shm dir: %w", err)
}
}
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
@@ -595,7 +596,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// exist once we're done.
newConf, err := r.state.GetContainerConfig(c.ID())
if err != nil {
- return errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", c.ID())
+ return fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", c.ID(), err)
}
c.config = newConf
@@ -610,12 +611,12 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if c.config.Pod != "" && !removePod {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID())
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
}
// Lock the pod while we're removing container
if pod.config.LockID == c.config.LockID {
- return errors.Wrapf(define.ErrWillDeadlock, "container %s and pod %s share lock ID %d", c.ID(), pod.ID(), c.config.LockID)
+ return fmt.Errorf("container %s and pod %s share lock ID %d: %w", c.ID(), pod.ID(), c.config.LockID, define.ErrWillDeadlock)
}
pod.lock.Lock()
defer pod.lock.Unlock()
@@ -625,7 +626,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
infraID := pod.state.InfraContainerID
if c.ID() == infraID {
- return errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
+ return fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
}
}
@@ -663,9 +664,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
}
if c.state.State == define.ContainerStatePaused {
- if err := c.ociRuntime.KillContainer(c, 9, false); err != nil {
- return err
- }
isV2, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
return err
@@ -676,6 +674,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
return err
}
}
+ if err := c.ociRuntime.KillContainer(c, 9, false); err != nil {
+ return err
+ }
// Need to update container state to make sure we know it's stopped
if err := c.waitForExitFileAndSync(); err != nil {
return err
@@ -692,7 +693,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
}
if len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
- return errors.Wrapf(define.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
+ return fmt.Errorf("container %s has dependent containers which must be removed before it: %s: %w", c.ID(), depsStr, define.ErrCtrExists)
}
}
@@ -704,8 +705,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
}
// Ignore ErrConmonDead - we couldn't retrieve the container's
// exit code properly, but it's still stopped.
- if err := c.stop(time); err != nil && errors.Cause(err) != define.ErrConmonDead {
- return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
+ if err := c.stop(time); err != nil && !errors.Is(err, define.ErrConmonDead) {
+ return fmt.Errorf("cannot remove container %s as it could not be stopped: %w", c.ID(), err)
}
// We unlocked as part of stop() above - there's a chance someone
@@ -714,6 +715,10 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// Do a quick ping of the database to check if the container
// still exists.
if ok, _ := r.state.HasContainer(c.ID()); !ok {
+ // When the container has already been removed, the OCI runtime directory remain.
+ if err := c.cleanupRuntime(ctx); err != nil {
+ return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err)
+ }
return nil
}
}
@@ -724,7 +729,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// Do this before we set ContainerStateRemoving, to ensure that we can
// actually remove from the OCI runtime.
if err := c.cleanup(ctx); err != nil {
- cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
+ cleanupErr = fmt.Errorf("error cleaning up container %s: %w", c.ID(), err)
}
// Set ContainerStateRemoving
@@ -734,7 +739,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr != nil {
logrus.Errorf(err.Error())
}
- return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID())
+ return fmt.Errorf("unable to set container %s removing state in database: %w", c.ID(), err)
}
// Remove all active exec sessions
@@ -754,7 +759,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr == nil {
cleanupErr = err
} else {
- logrus.Errorf("Cleanup storage: %v", err)
+ logrus.Errorf("Cleaning up storage: %v", err)
}
}
@@ -784,7 +789,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
// Deallocate the container's lock
if err := c.lock.Free(); err != nil {
if cleanupErr == nil {
- cleanupErr = errors.Wrapf(err, "error freeing lock for container %s", c.ID())
+ cleanupErr = fmt.Errorf("error freeing lock for container %s: %w", c.ID(), err)
} else {
logrus.Errorf("Free container lock: %v", err)
}
@@ -804,16 +809,16 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if !volume.Anonymous() {
continue
}
- if err := runtime.removeVolume(ctx, volume, false, timeout); err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
- if errors.Cause(err) == define.ErrVolumeBeingUsed {
+ if err := runtime.removeVolume(ctx, volume, false, timeout, false); err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
+ if errors.Is(err, define.ErrVolumeBeingUsed) {
// Ignore error, since podman will report original error
volumesFrom, _ := c.volumesFrom()
if len(volumesFrom) > 0 {
- logrus.Debugf("Cleanup volume not possible since volume is in use (%s)", v)
+ logrus.Debugf("Cleaning up volume not possible since volume is in use (%s)", v)
continue
}
}
- logrus.Errorf("Cleanup volume (%s): %v", v, err)
+ logrus.Errorf("Cleaning up volume (%s): %v", v, err)
}
}
}
@@ -886,7 +891,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
c := new(Container)
c.config, err = r.state.GetContainerConfig(id)
if err != nil {
- return id, errors.Wrapf(err, "failed to retrieve config for ctr ID %q", id)
+ return id, fmt.Errorf("failed to retrieve config for ctr ID %q: %w", id, err)
}
c.state = new(ContainerState)
@@ -898,7 +903,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
if c.config.Pod != "" {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
- return id, errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID())
+ return id, fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
}
// Lock the pod while we're removing container
@@ -913,7 +918,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
return "", err
}
if c.ID() == infraID {
- return id, errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
+ return id, fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
}
}
@@ -962,8 +967,8 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
if !volume.Anonymous() {
continue
}
- if err := r.removeVolume(ctx, volume, false, timeout); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed {
- logrus.Errorf("Cleanup volume (%s): %v", v, err)
+ if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed {
+ logrus.Errorf("Cleaning up volume (%s): %v", v, err)
}
}
}
@@ -1110,7 +1115,7 @@ func (r *Runtime) GetContainersByList(containers []string) ([]*Container, error)
for _, inputContainer := range containers {
ctr, err := r.LookupContainer(inputContainer)
if err != nil {
- return ctrs, errors.Wrapf(err, "unable to lookup container %s", inputContainer)
+ return ctrs, fmt.Errorf("unable to look up container %s: %w", inputContainer, err)
}
ctrs = append(ctrs, ctr)
}
@@ -1123,7 +1128,7 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
var lastCreatedTime time.Time
ctrs, err := r.GetAllContainers()
if err != nil {
- return nil, errors.Wrapf(err, "unable to find latest container")
+ return nil, fmt.Errorf("unable to find latest container: %w", err)
}
if len(ctrs) == 0 {
return nil, define.ErrNoSuchCtr
@@ -1204,7 +1209,7 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) ([]*reports.Pru
// MountStorageContainer mounts the storage container's root filesystem
func (r *Runtime) MountStorageContainer(id string) (string, error) {
if _, err := r.GetContainer(id); err == nil {
- return "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
container, err := r.store.Container(id)
if err != nil {
@@ -1212,7 +1217,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) {
}
mountPoint, err := r.store.Mount(container.ID, "")
if err != nil {
- return "", errors.Wrapf(err, "error mounting storage for container %s", id)
+ return "", fmt.Errorf("error mounting storage for container %s: %w", id, err)
}
return mountPoint, nil
}
@@ -1220,7 +1225,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) {
// UnmountStorageContainer unmounts the storage container's root filesystem
func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
if _, err := r.GetContainer(id); err == nil {
- return false, errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return false, fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
container, err := r.store.Container(id)
if err != nil {
@@ -1234,7 +1239,7 @@ func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
func (r *Runtime) IsStorageContainerMounted(id string) (bool, string, error) {
var path string
if _, err := r.GetContainer(id); err == nil {
- return false, "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ return false, "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
}
mountCnt, err := r.storageService.MountedContainerImage(id)
@@ -1260,13 +1265,13 @@ func (r *Runtime) StorageContainers() ([]storage.Container, error) {
storeContainers, err := r.store.Containers()
if err != nil {
- return nil, errors.Wrapf(err, "error reading list of all storage containers")
+ return nil, fmt.Errorf("error reading list of all storage containers: %w", err)
}
retCtrs := []storage.Container{}
for _, container := range storeContainers {
exists, err := r.state.HasContainer(container.ID)
if err != nil && err != define.ErrNoSuchCtr {
- return nil, errors.Wrapf(err, "failed to check if %s container exists in database", container.ID)
+ return nil, fmt.Errorf("failed to check if %s container exists in database: %w", container.ID, err)
}
if exists {
continue
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index b13482722..d04607d2e 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -2,6 +2,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -13,7 +15,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -40,14 +41,14 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
if ctr.config.IsInfra {
pod, err := r.state.Pod(ctr.config.Pod)
if err != nil {
- return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod)
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", ctr.ID(), ctr.config.Pod, err)
}
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
- return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
+ return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
}
} else {
if err := r.removeContainer(ctx, ctr, true, false, false, timeout); err != nil {
- return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
+ return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
}
}
}
@@ -106,7 +107,7 @@ func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions,
func DownloadFromFile(reader *os.File) (string, error) {
outFile, err := ioutil.TempFile(util.Tmpdir(), "import")
if err != nil {
- return "", errors.Wrap(err, "error creating file")
+ return "", fmt.Errorf("error creating file: %w", err)
}
defer outFile.Close()
@@ -114,7 +115,7 @@ func DownloadFromFile(reader *os.File) (string, error) {
_, err = io.Copy(outFile, reader)
if err != nil {
- return "", errors.Wrapf(err, "error saving %s to %s", reader.Name(), outFile.Name())
+ return "", fmt.Errorf("error saving %s to %s: %w", reader.Name(), outFile.Name(), err)
}
return outFile.Name(), nil
diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go
index f56cb83a4..139638a6b 100644
--- a/libpod/runtime_migrate.go
+++ b/libpod/runtime_migrate.go
@@ -14,7 +14,6 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,21 +21,21 @@ func (r *Runtime) stopPauseProcess() error {
if rootless.IsRootless() {
pausePidPath, err := util.GetRootlessPauseProcessPidPathGivenDir(r.config.Engine.TmpDir)
if err != nil {
- return errors.Wrapf(err, "could not get pause process pid file path")
+ return fmt.Errorf("could not get pause process pid file path: %w", err)
}
data, err := ioutil.ReadFile(pausePidPath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrap(err, "cannot read pause process pid file")
+ return fmt.Errorf("cannot read pause process pid file: %w", err)
}
pausePid, err := strconv.Atoi(string(data))
if err != nil {
- return errors.Wrapf(err, "cannot parse pause pid file %s", pausePidPath)
+ return fmt.Errorf("cannot parse pause pid file %s: %w", pausePidPath, err)
}
if err := os.Remove(pausePidPath); err != nil {
- return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath)
+ return fmt.Errorf("cannot delete pause pid file %s: %w", pausePidPath, err)
}
if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil {
return err
@@ -60,7 +59,7 @@ func (r *Runtime) migrate() error {
for _, ctr := range runningContainers {
fmt.Printf("stopped %s\n", ctr.ID())
if err := ctr.Stop(); err != nil {
- return errors.Wrapf(err, "cannot stop container %s", ctr.ID())
+ return fmt.Errorf("cannot stop container %s: %w", ctr.ID(), err)
}
}
@@ -68,7 +67,7 @@ func (r *Runtime) migrate() error {
runtimeChangeRequested := r.migrateRuntime != ""
requestedRuntime, runtimeExists := r.ociRuntimes[r.migrateRuntime]
if !runtimeExists && runtimeChangeRequested {
- return errors.Wrapf(define.ErrInvalidArg, "change to runtime %q requested but no such runtime is defined", r.migrateRuntime)
+ return fmt.Errorf("change to runtime %q requested but no such runtime is defined: %w", r.migrateRuntime, define.ErrInvalidArg)
}
for _, ctr := range allCtrs {
@@ -93,7 +92,7 @@ func (r *Runtime) migrate() error {
if needsWrite {
if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
- return errors.Wrapf(err, "error rewriting config for container %s", ctr.ID())
+ return fmt.Errorf("error rewriting config for container %s: %w", ctr.ID(), err)
}
}
}
diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go
index dca0ffc8a..25e48de14 100644
--- a/libpod/runtime_pod.go
+++ b/libpod/runtime_pod.go
@@ -2,11 +2,12 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"time"
+ "github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/pkg/util"
- "github.com/pkg/errors"
)
// Contains the public Runtime API for pods
@@ -112,7 +113,7 @@ func (r *Runtime) GetLatestPod() (*Pod, error) {
var lastCreatedTime time.Time
pods, err := r.GetAllPods()
if err != nil {
- return nil, errors.Wrapf(err, "unable to get all pods")
+ return nil, fmt.Errorf("unable to get all pods: %w", err)
}
if len(pods) == 0 {
return nil, define.ErrNoSuchPod
@@ -146,7 +147,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) {
pods = append(pods, c.PodID())
pod, err := r.GetPod(c.PodID())
if err != nil {
- if errors.Cause(err) == define.ErrPodRemoved || errors.Cause(err) == define.ErrNoSuchPod {
+ if errors.Is(err, define.ErrPodRemoved) || errors.Is(err, define.ErrNoSuchPod) {
continue
}
return nil, err
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index dcc3a044f..75ff24e41 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"os"
"path"
@@ -17,8 +18,7 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/specgen"
- spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
+ runcconfig "github.com/opencontainers/runc/libcontainer/configs"
"github.com/sirupsen/logrus"
)
@@ -38,14 +38,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
for _, option := range options {
if err := option(pod); err != nil {
- return nil, errors.Wrapf(err, "error running pod create option")
+ return nil, fmt.Errorf("error running pod create option: %w", err)
}
}
// Allocate a lock for the pod
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new pod")
+ return nil, fmt.Errorf("error allocating lock for new pod: %w", err)
}
pod.lock = lock
pod.config.LockID = pod.lock.ID()
@@ -66,19 +66,37 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
case config.CgroupfsCgroupsManager:
canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(pod.config.CgroupParent)
if canUseCgroup {
+ // need to actually create parent here
if pod.config.CgroupParent == "" {
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
- // No need to create it with cgroupfs - the first container to
- // launch should do it for us
if pod.config.UsePodCgroup {
pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID())
if p.InfraContainerSpec != nil {
p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath
+ // cgroupfs + rootless = permission denied when creating the cgroup.
+ if !rootless.IsRootless() {
+ res, err := GetLimits(p.InfraContainerSpec.ResourceLimits)
+ if err != nil {
+ return nil, err
+ }
+ // Need to both create and update the cgroup
+ // rather than create a new path in c/common for pod cgroup creation
+ // just create as if it is a ctr and then update figures out that we need to
+ // populate the resource limits on the pod level
+ cgc, err := cgroups.New(pod.state.CgroupPath, &res)
+ if err != nil {
+ return nil, err
+ }
+ err = cgc.Update(&res)
+ if err != nil {
+ return nil, err
+ }
+ }
}
}
}
@@ -90,14 +108,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
pod.config.CgroupParent = SystemdDefaultCgroupParent
}
} else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
+ return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
if pod.config.UsePodCgroup {
- cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()))
+ cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.InfraContainerSpec.ResourceLimits)
if err != nil {
- return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID())
+ return nil, fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err)
}
pod.state.CgroupPath = cgroupPath
if p.InfraContainerSpec != nil {
@@ -105,7 +123,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
}
}
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
+ return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
}
}
@@ -114,7 +132,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
}
if !pod.HasInfraContainer() && pod.SharesNamespaces() {
- return nil, errors.Errorf("Pods must have an infra container to share namespaces")
+ return nil, errors.New("Pods must have an infra container to share namespaces")
}
if pod.HasInfraContainer() && !pod.SharesNamespaces() {
logrus.Infof("Pod has an infra container, but shares no namespaces")
@@ -139,12 +157,12 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
if addPodErr = r.state.AddPod(pod); addPodErr == nil {
return pod, nil
}
- if !generateName || (errors.Cause(addPodErr) != define.ErrPodExists && errors.Cause(addPodErr) != define.ErrCtrExists) {
+ if !generateName || (!errors.Is(addPodErr, define.ErrPodExists) && !errors.Is(addPodErr, define.ErrCtrExists)) {
break
}
}
if addPodErr != nil {
- return nil, errors.Wrapf(addPodErr, "error adding pod to state")
+ return nil, fmt.Errorf("error adding pod to state: %w", addPodErr)
}
return pod, nil
@@ -193,7 +211,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
force = true
}
if !removeCtrs && numCtrs > 0 {
- return errors.Wrapf(define.ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID())
+ return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
}
// Go through and lock all containers so we can operate on them all at
@@ -221,7 +239,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Ensure state appropriate for removal
if err := ctr.checkReadyForRemoval(); err != nil {
- return errors.Wrapf(err, "pod %s has containers that are not ready to be removed", p.ID())
+ return fmt.Errorf("pod %s has containers that are not ready to be removed: %w", p.ID(), err)
}
}
@@ -239,9 +257,8 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
// New resource limits
- resLimits := new(spec.LinuxResources)
- resLimits.Pids = new(spec.LinuxPids)
- resLimits.Pids.Limit = 1 // Inhibit forks with very low pids limit
+ resLimits := new(runcconfig.Resources)
+ resLimits.PidsLimit = 1 // Inhibit forks with very low pids limit
// Don't try if we failed to retrieve the cgroup
if err == nil {
@@ -294,15 +311,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
for volName := range ctrNamedVolumes {
volume, err := r.state.Volume(volName)
- if err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
+ if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
logrus.Errorf("Retrieving volume %s: %v", volName, err)
continue
}
if !volume.Anonymous() {
continue
}
- if err := r.removeVolume(ctx, volume, false, timeout); err != nil {
- if errors.Cause(err) == define.ErrNoSuchVolume || errors.Cause(err) == define.ErrVolumeRemoved {
+ if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil {
+ if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) {
continue
}
logrus.Errorf("Removing volume %s: %v", volName, err)
@@ -321,9 +338,9 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
switch p.runtime.config.Engine.CgroupManager {
case config.SystemdCgroupsManager:
- if err := deleteSystemdCgroup(p.state.CgroupPath); err != nil {
+ if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -337,7 +354,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID())
+ removalErr = fmt.Errorf("error retrieving pod %s conmon cgroup: %w", p.ID(), err)
} else {
logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
@@ -345,7 +362,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
if err == nil {
if err = conmonCgroup.Delete(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s conmon cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
@@ -354,7 +371,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
cgroup, err := cgroups.Load(p.state.CgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error retrieving pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -362,7 +379,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
if err == nil {
if err := cgroup.Delete(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
+ removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err)
} else {
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
@@ -373,7 +390,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// keep going so we make sure to evict the pod before
// ending up with an inconsistent state.
if removalErr == nil {
- removalErr = errors.Wrapf(define.ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.Engine.CgroupManager, p.ID())
+ removalErr = fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal)
} else {
logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID())
}
@@ -399,7 +416,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Deallocate the pod lock
if err := p.lock.Free(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "error freeing pod %s lock", p.ID())
+ removalErr = fmt.Errorf("error freeing pod %s lock: %w", p.ID(), err)
} else {
logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err)
}
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
index db055f40b..9149dd72f 100644
--- a/libpod/runtime_renumber.go
+++ b/libpod/runtime_renumber.go
@@ -1,8 +1,9 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/events"
- "github.com/pkg/errors"
)
// renumberLocks reassigns lock numbers for all containers and pods in the
@@ -26,7 +27,7 @@ func (r *Runtime) renumberLocks() error {
for _, ctr := range allCtrs {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
+ return fmt.Errorf("error allocating lock for container %s: %w", ctr.ID(), err)
}
ctr.config.LockID = lock.ID()
@@ -43,7 +44,7 @@ func (r *Runtime) renumberLocks() error {
for _, pod := range allPods {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
+ return fmt.Errorf("error allocating lock for pod %s: %w", pod.ID(), err)
}
pod.config.LockID = lock.ID()
@@ -60,7 +61,7 @@ func (r *Runtime) renumberLocks() error {
for _, vol := range allVols {
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name())
+ return fmt.Errorf("error allocating lock for volume %s: %w", vol.Name(), err)
}
vol.config.LockID = lock.ID()
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index 21bf8aefc..9efb30e03 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -2,11 +2,11 @@ package libpod
import (
"context"
+ "errors"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/domain/entities/reports"
- "github.com/pkg/errors"
)
// Contains the public Runtime API for volumes
@@ -33,7 +33,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool, timeo
return nil
}
}
- return r.removeVolume(ctx, v, force, timeout)
+ return r.removeVolume(ctx, v, force, timeout, false)
}
// GetVolume retrieves a volume given its full name.
@@ -133,7 +133,7 @@ func (r *Runtime) PruneVolumes(ctx context.Context, filterFuncs []VolumeFilter)
report.Id = vol.Name()
var timeout *uint
if err := r.RemoveVolume(ctx, vol, false, timeout); err != nil {
- if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved {
+ if !errors.Is(err, define.ErrVolumeBeingUsed) && !errors.Is(err, define.ErrVolumeRemoved) {
report.Err = err
} else {
// We didn't remove the volume for some reason
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index f8788e183..a751d75d2 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -5,6 +5,8 @@ package libpod
import (
"context"
+ "errors"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -16,7 +18,6 @@ import (
"github.com/containers/storage/drivers/quota"
"github.com/containers/storage/pkg/stringid"
pluginapi "github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -25,15 +26,17 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
if !r.valid {
return nil, define.ErrRuntimeStopped
}
- return r.newVolume(options...)
+ return r.newVolume(false, options...)
}
-// newVolume creates a new empty volume
-func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredErr error) {
+// newVolume creates a new empty volume with the given options.
+// The createPluginVolume can be set to true to make it not create the volume in the volume plugin,
+// this is required for the UpdateVolumePlugins() function. If you are not sure set this to false.
+func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOption) (_ *Volume, deferredErr error) {
volume := newVolume(r)
for _, option := range options {
if err := option(volume); err != nil {
- return nil, errors.Wrapf(err, "running volume create option")
+ return nil, fmt.Errorf("running volume create option: %w", err)
}
}
@@ -48,17 +51,17 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
// Check if volume with given name exists.
exists, err := r.state.HasVolume(volume.config.Name)
if err != nil {
- return nil, errors.Wrapf(err, "checking if volume with name %s exists", volume.config.Name)
+ return nil, fmt.Errorf("checking if volume with name %s exists: %w", volume.config.Name, err)
}
if exists {
- return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name)
+ return nil, fmt.Errorf("volume with name %s already exists: %w", volume.config.Name, define.ErrVolumeExists)
}
// Plugin can be nil if driver is local, but that's OK - superfluous
// assignment doesn't hurt much.
- plugin, err := r.getVolumePlugin(volume.config.Driver)
+ plugin, err := r.getVolumePlugin(volume.config)
if err != nil {
- return nil, errors.Wrapf(err, "volume %s uses volume plugin %s but it could not be retrieved", volume.config.Name, volume.config.Driver)
+ return nil, fmt.Errorf("volume %s uses volume plugin %s but it could not be retrieved: %w", volume.config.Name, volume.config.Driver, err)
}
volume.plugin = plugin
@@ -70,20 +73,20 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
case "device":
if strings.ToLower(volume.config.Options["type"]) == "bind" {
if _, err := os.Stat(val); err != nil {
- return nil, errors.Wrapf(err, "invalid volume option %s for driver 'local'", key)
+ return nil, fmt.Errorf("invalid volume option %s for driver 'local': %w", key, err)
}
}
- case "o", "type", "uid", "gid", "size", "inodes", "noquota":
+ case "o", "type", "uid", "gid", "size", "inodes", "noquota", "copy", "nocopy":
// Do nothing, valid keys
default:
- return nil, errors.Wrapf(define.ErrInvalidArg, "invalid mount option %s for driver 'local'", key)
+ return nil, fmt.Errorf("invalid mount option %s for driver 'local': %w", key, define.ErrInvalidArg)
}
}
}
// Now we get conditional: we either need to make the volume in the
// volume plugin, or on disk if not using a plugin.
- if volume.plugin != nil {
+ if volume.plugin != nil && !noCreatePluginVolume {
// We can't chown, or relabel, or similar the path the volume is
// using, because it's not managed by us.
// TODO: reevaluate this once we actually have volume plugins in
@@ -96,17 +99,17 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
// Create the mountpoint of this volume
volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name)
if err := os.MkdirAll(volPathRoot, 0700); err != nil {
- return nil, errors.Wrapf(err, "creating volume directory %q", volPathRoot)
+ return nil, fmt.Errorf("creating volume directory %q: %w", volPathRoot, err)
}
if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
+ return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", volPathRoot, volume.config.UID, volume.config.GID, err)
}
fullVolPath := filepath.Join(volPathRoot, "_data")
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
- return nil, errors.Wrapf(err, "creating volume directory %q", fullVolPath)
+ return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
}
if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
- return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID)
+ return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
}
if err := LabelVolumePath(fullVolPath); err != nil {
return nil, err
@@ -131,7 +134,7 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
}
if projectQuotaSupported {
if err := q.SetQuota(fullVolPath, quota); err != nil {
- return nil, errors.Wrapf(err, "failed to set size quota size=%d inodes=%d for volume directory %q", volume.config.Size, volume.config.Inodes, fullVolPath)
+ return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, fullVolPath, err)
}
}
}
@@ -141,7 +144,7 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "allocating lock for new volume")
+ return nil, fmt.Errorf("allocating lock for new volume: %w", err)
}
volume.lock = lock
volume.config.LockID = volume.lock.ID()
@@ -158,12 +161,91 @@ func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredE
// Add the volume to state
if err := r.state.AddVolume(volume); err != nil {
- return nil, errors.Wrapf(err, "adding volume to state")
+ return nil, fmt.Errorf("adding volume to state: %w", err)
}
defer volume.newVolumeEvent(events.Create)
return volume, nil
}
+// UpdateVolumePlugins reads all volumes from all configured volume plugins and
+// imports them into the libpod db. It also checks if existing libpod volumes
+// are removed in the plugin, in this case we try to remove it from libpod.
+// On errors we continue and try to do as much as possible. all errors are
+// returned as array in the returned struct.
+// This function has many race conditions, it is best effort but cannot guarantee
+// a perfect state since plugins can be modified from the outside at any time.
+func (r *Runtime) UpdateVolumePlugins(ctx context.Context) *define.VolumeReload {
+ var (
+ added []string
+ removed []string
+ errs []error
+ allPluginVolumes = map[string]struct{}{}
+ )
+
+ for driverName, socket := range r.config.Engine.VolumePlugins {
+ driver, err := volplugin.GetVolumePlugin(driverName, socket, 0)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ vols, err := driver.ListVolumes()
+ if err != nil {
+ errs = append(errs, fmt.Errorf("failed to read volumes from plugin %q: %w", driverName, err))
+ continue
+ }
+ for _, vol := range vols {
+ allPluginVolumes[vol.Name] = struct{}{}
+ if _, err := r.newVolume(true, WithVolumeName(vol.Name), WithVolumeDriver(driverName)); err != nil {
+ // If the volume exists this is not an error, just ignore it and log. It is very likely
+ // that the volume from the plugin was already in our db.
+ if !errors.Is(err, define.ErrVolumeExists) {
+ errs = append(errs, err)
+ continue
+ }
+ logrus.Infof("Volume %q already exists: %v", vol.Name, err)
+ continue
+ }
+ added = append(added, vol.Name)
+ }
+ }
+
+ libpodVolumes, err := r.state.AllVolumes()
+ if err != nil {
+ errs = append(errs, fmt.Errorf("cannot delete dangling plugin volumes: failed to read libpod volumes: %w", err))
+ }
+ for _, vol := range libpodVolumes {
+ if vol.UsesVolumeDriver() {
+ if _, ok := allPluginVolumes[vol.Name()]; !ok {
+ // The volume is no longer in the plugin, lets remove it from the libpod db.
+ if err := r.removeVolume(ctx, vol, false, nil, true); err != nil {
+ if errors.Is(err, define.ErrVolumeBeingUsed) {
+ // Volume is still used by at least one container. This is very bad,
+ // the plugin no longer has this but we still need it.
+ errs = append(errs, fmt.Errorf("volume was removed from the plugin %q but containers still require it: %w", vol.config.Driver, err))
+ continue
+ }
+ if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) || errors.Is(err, define.ErrMissingPlugin) {
+ // Volume was already removed, no problem just ignore it and continue.
+ continue
+ }
+
+ // some other error
+ errs = append(errs, err)
+ continue
+ }
+ // Volume was successfully removed
+ removed = append(removed, vol.Name())
+ }
+ }
+ }
+
+ return &define.VolumeReload{
+ Added: added,
+ Removed: removed,
+ Errors: errs,
+ }
+}
+
// makeVolumeInPluginIfNotExist makes a volume in the given volume plugin if it
// does not already exist.
func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin *volplugin.VolumePlugin) error {
@@ -190,15 +272,17 @@ func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin
createReq.Name = name
createReq.Options = options
if err := plugin.CreateVolume(createReq); err != nil {
- return errors.Wrapf(err, "creating volume %q in plugin %s", name, plugin.Name)
+ return fmt.Errorf("creating volume %q in plugin %s: %w", name, plugin.Name, err)
}
}
return nil
}
-// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
-func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeout *uint) error {
+// removeVolume removes the specified volume from state as well tears down its mountpoint and storage.
+// ignoreVolumePlugin is used to only remove the volume from the db and not the plugin,
+// this is required when the volume was already removed from the plugin, i.e. in UpdateVolumePlugins().
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeout *uint, ignoreVolumePlugin bool) error {
if !v.valid {
if ok, _ := r.state.HasVolume(v.Name()); !ok {
return nil
@@ -221,7 +305,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
if !force {
- return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
+ return fmt.Errorf("volume %s is being used by the following container(s): %s: %w", v.Name(), depsStr, define.ErrVolumeBeingUsed)
}
// We need to remove all containers using the volume
@@ -230,17 +314,17 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if err != nil {
// If the container's removed, no point in
// erroring.
- if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
+ if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
continue
}
- return errors.Wrapf(err, "removing container %s that depends on volume %s", dep, v.Name())
+ return fmt.Errorf("removing container %s that depends on volume %s: %w", dep, v.Name(), err)
}
logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
if err := r.removeContainer(ctx, ctr, force, false, false, timeout); err != nil {
- return errors.Wrapf(err, "removing container %s that depends on volume %s", ctr.ID(), v.Name())
+ return fmt.Errorf("removing container %s that depends on volume %s: %w", ctr.ID(), v.Name(), err)
}
}
}
@@ -253,7 +337,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
// them.
logrus.Errorf("Unmounting volume %s: %v", v.Name(), err)
} else {
- return errors.Wrapf(err, "unmounting volume %s", v.Name())
+ return fmt.Errorf("unmounting volume %s: %w", v.Name(), err)
}
}
@@ -263,13 +347,13 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
var removalErr error
// If we use a volume plugin, we need to remove from the plugin.
- if v.UsesVolumeDriver() {
+ if v.UsesVolumeDriver() && !ignoreVolumePlugin {
canRemove := true
// Do we have a volume driver?
if v.plugin == nil {
canRemove = false
- removalErr = errors.Wrapf(define.ErrMissingPlugin, "cannot remove volume %s from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ removalErr = fmt.Errorf("cannot remove volume %s from plugin %s, but it has been removed from Podman: %w", v.Name(), v.Driver(), define.ErrMissingPlugin)
} else {
// Ping the plugin first to verify the volume still
// exists.
@@ -280,14 +364,14 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
getReq.Name = v.Name()
if _, err := v.plugin.GetVolume(getReq); err != nil {
canRemove = false
- removalErr = errors.Wrapf(err, "volume %s could not be retrieved from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
+ removalErr = fmt.Errorf("volume %s could not be retrieved from plugin %s, but it has been removed from Podman: %w", v.Name(), v.Driver(), err)
}
}
if canRemove {
req := new(pluginapi.RemoveRequest)
req.Name = v.Name()
if err := v.plugin.RemoveVolume(req); err != nil {
- return errors.Wrapf(err, "volume %s could not be removed from plugin %s", v.Name(), v.Driver())
+ return fmt.Errorf("volume %s could not be removed from plugin %s: %w", v.Name(), v.Driver(), err)
}
}
}
@@ -297,13 +381,13 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
if removalErr != nil {
logrus.Errorf("Removing volume %s from plugin %s: %v", v.Name(), v.Driver(), removalErr)
}
- return errors.Wrapf(err, "removing volume %s", v.Name())
+ return fmt.Errorf("removing volume %s: %w", v.Name(), err)
}
// Free the volume's lock
if err := v.lock.Free(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "freeing lock for volume %s", v.Name())
+ removalErr = fmt.Errorf("freeing lock for volume %s: %w", v.Name(), err)
} else {
logrus.Errorf("Freeing lock for volume %q: %v", v.Name(), err)
}
@@ -313,7 +397,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
// from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
if removalErr == nil {
- removalErr = errors.Wrapf(err, "cleaning up volume storage for %q", v.Name())
+ removalErr = fmt.Errorf("cleaning up volume storage for %q: %w", v.Name(), err)
} else {
logrus.Errorf("Cleaning up volume storage for volume %q: %v", v.Name(), err)
}
diff --git a/libpod/runtime_worker.go b/libpod/runtime_worker.go
index ca44a27f7..9d41321b2 100644
--- a/libpod/runtime_worker.go
+++ b/libpod/runtime_worker.go
@@ -1,40 +1,17 @@
package libpod
-import (
- "time"
-)
-
func (r *Runtime) startWorker() {
- if r.workerChannel == nil {
- r.workerChannel = make(chan func(), 1)
- r.workerShutdown = make(chan bool)
- }
+ r.workerChannel = make(chan func(), 10)
go func() {
- for {
- // Make sure to read all workers before
- // checking if we're about to shutdown.
- for len(r.workerChannel) > 0 {
- w := <-r.workerChannel
- w()
- }
-
- select {
- // We'll read from the shutdown channel only when all
- // items above have been processed.
- //
- // (*Runtime).Shutdown() will block until until the
- // item is read.
- case <-r.workerShutdown:
- return
-
- default:
- time.Sleep(100 * time.Millisecond)
- }
+ for w := range r.workerChannel {
+ w()
+ r.workerGroup.Done()
}
}()
}
func (r *Runtime) queueWork(f func()) {
+ r.workerGroup.Add(1)
go func() {
r.workerChannel <- f
}()
diff --git a/libpod/service.go b/libpod/service.go
index c14f5e51d..a8928277f 100644
--- a/libpod/service.go
+++ b/libpod/service.go
@@ -2,10 +2,10 @@ package libpod
import (
"context"
+ "errors"
"fmt"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -29,7 +29,7 @@ func (p *Pod) hasServiceContainer() bool {
func (p *Pod) serviceContainer() (*Container, error) {
id := p.config.ServiceContainerID
if id == "" {
- return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no service container")
+ return nil, fmt.Errorf("pod has no service container: %w", define.ErrNoSuchCtr)
}
return p.runtime.state.Container(id)
}
diff --git a/libpod/shutdown/handler.go b/libpod/shutdown/handler.go
index 9add05c9c..75e9b4e8a 100644
--- a/libpod/shutdown/handler.go
+++ b/libpod/shutdown/handler.go
@@ -1,18 +1,18 @@
package shutdown
import (
+ "errors"
"os"
"os/signal"
"sync"
"syscall"
"time"
- "github.com/pkg/errors"
logrusImport "github.com/sirupsen/logrus"
)
var (
- ErrHandlerExists error = errors.New("handler with given name already exists")
+ ErrHandlerExists = errors.New("handler with given name already exists")
)
var (
diff --git a/libpod/state.go b/libpod/state.go
index 471023769..4fbd3c302 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -111,6 +111,15 @@ type State interface {
// Return a container config from the database by full ID
GetContainerConfig(id string) (*ContainerConfig, error)
+ // Add the exit code for the specified container to the database.
+ AddContainerExitCode(id string, exitCode int32) error
+
+ // Return the exit code for the specified container.
+ GetContainerExitCode(id string) (int32, error)
+
+ // Remove exit codes older than 5 minutes.
+ PruneContainerExitCodes() error
+
// Add creates a reference to an exec session in the database.
// The container the exec session is attached to will be recorded.
// The container state will not be modified.
diff --git a/libpod/stats.go b/libpod/stats.go
index 25baa378d..c7e9e5128 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -4,14 +4,16 @@
package libpod
import (
+ "fmt"
"math"
"strings"
"syscall"
"time"
+ runccgroup "github.com/opencontainers/runc/libcontainer/cgroups"
+
"github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
)
// GetContainerStats gets the running stats for a given container.
@@ -23,7 +25,7 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
stats.Name = c.Name()
if c.config.NoCgroups {
- return nil, errors.Wrapf(define.ErrNoCgroups, "cannot run top on container %s as it did not create a cgroup", c.ID())
+ return nil, fmt.Errorf("cannot run top on container %s as it did not create a cgroup: %w", c.ID(), define.ErrNoCgroups)
}
if !c.batched {
@@ -34,8 +36,9 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
}
}
+ // returns stats with the fields' default values respective of their type
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
- return stats, define.ErrCtrStateInvalid
+ return stats, nil
}
if previousStats == nil {
@@ -52,13 +55,13 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
}
cgroup, err := cgroups.Load(cgroupPath)
if err != nil {
- return stats, errors.Wrapf(err, "unable to load cgroup at %s", cgroupPath)
+ return stats, fmt.Errorf("unable to load cgroup at %s: %w", cgroupPath, err)
}
// Ubuntu does not have swap memory in cgroups because swap is often not enabled.
cgroupStats, err := cgroup.Stat()
if err != nil {
- return stats, errors.Wrapf(err, "unable to obtain cgroup stats")
+ return stats, fmt.Errorf("unable to obtain cgroup stats: %w", err)
}
conState := c.state.State
netStats, err := getContainerNetIO(c)
@@ -68,29 +71,29 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
// If the current total usage in the cgroup is less than what was previously
// recorded then it means the container was restarted and runs in a new cgroup
- if previousStats.Duration > cgroupStats.CPU.Usage.Total {
+ if previousStats.Duration > cgroupStats.CpuStats.CpuUsage.TotalUsage {
previousStats = &define.ContainerStats{}
}
previousCPU := previousStats.CPUNano
now := uint64(time.Now().UnixNano())
- stats.Duration = cgroupStats.CPU.Usage.Total
+ stats.Duration = cgroupStats.CpuStats.CpuUsage.TotalUsage
stats.UpTime = time.Duration(stats.Duration)
stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, now, previousStats.SystemNano)
// calc the average cpu usage for the time the container is running
stats.AvgCPU = calculateCPUPercent(cgroupStats, 0, now, uint64(c.state.StartedTime.UnixNano()))
- stats.MemUsage = cgroupStats.Memory.Usage.Usage
+ stats.MemUsage = cgroupStats.MemoryStats.Usage.Usage
stats.MemLimit = c.getMemLimit()
stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100
stats.PIDs = 0
if conState == define.ContainerStateRunning || conState == define.ContainerStatePaused {
- stats.PIDs = cgroupStats.Pids.Current
+ stats.PIDs = cgroupStats.PidsStats.Current
}
stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats)
- stats.CPUNano = cgroupStats.CPU.Usage.Total
- stats.CPUSystemNano = cgroupStats.CPU.Usage.Kernel
+ stats.CPUNano = cgroupStats.CpuStats.CpuUsage.TotalUsage
+ stats.CPUSystemNano = cgroupStats.CpuStats.CpuUsage.UsageInKernelmode
stats.SystemNano = now
- stats.PerCPU = cgroupStats.CPU.Usage.PerCPU
+ stats.PerCPU = cgroupStats.CpuStats.CpuUsage.PercpuUsage
// Handle case where the container is not in a network namespace
if netStats != nil {
stats.NetInput = netStats.TxBytes
@@ -132,10 +135,10 @@ func (c *Container) getMemLimit() uint64 {
// previousCPU is the last value of stats.CPU.Usage.Total measured at the time previousSystem.
// (now - previousSystem) is the time delta in nanoseconds, between the measurement in previousCPU
// and the updated value in stats.
-func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, now, previousSystem uint64) float64 {
+func calculateCPUPercent(stats *runccgroup.Stats, previousCPU, now, previousSystem uint64) float64 {
var (
cpuPercent = 0.0
- cpuDelta = float64(stats.CPU.Usage.Total - previousCPU)
+ cpuDelta = float64(stats.CpuStats.CpuUsage.TotalUsage - previousCPU)
systemDelta = float64(now - previousSystem)
)
if systemDelta > 0.0 && cpuDelta > 0.0 {
@@ -145,8 +148,8 @@ func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, now, previousSyste
return cpuPercent
}
-func calculateBlockIO(stats *cgroups.Metrics) (read uint64, write uint64) {
- for _, blkIOEntry := range stats.Blkio.IoServiceBytesRecursive {
+func calculateBlockIO(stats *runccgroup.Stats) (read uint64, write uint64) {
+ for _, blkIOEntry := range stats.BlkioStats.IoServiceBytesRecursive {
switch strings.ToLower(blkIOEntry.Op) {
case "read":
read += blkIOEntry.Value
diff --git a/libpod/storage.go b/libpod/storage.go
index a85348729..dc19a5d4f 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "errors"
"time"
istorage "github.com/containers/image/v5/storage"
@@ -10,7 +11,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -184,7 +184,7 @@ func (r *storageService) DeleteContainer(idOrName string) error {
}
err = r.store.DeleteContainer(container.ID)
if err != nil {
- if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
logrus.Infof("Storage for container %s already removed", container.ID)
} else {
logrus.Debugf("Failed to delete container %q: %v", container.ID, err)
@@ -218,7 +218,7 @@ func (r *storageService) GetContainerMetadata(idOrName string) (RuntimeContainer
func (r *storageService) MountContainerImage(idOrName string) (string, error) {
container, err := r.store.Container(idOrName)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
return "", define.ErrNoSuchCtr
}
return "", err
@@ -281,7 +281,7 @@ func (r *storageService) MountedContainerImage(idOrName string) (int, error) {
func (r *storageService) GetMountpoint(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
return "", define.ErrNoSuchCtr
}
return "", err
@@ -297,7 +297,7 @@ func (r *storageService) GetMountpoint(id string) (string, error) {
func (r *storageService) GetWorkDir(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
return "", define.ErrNoSuchCtr
}
return "", err
@@ -308,7 +308,7 @@ func (r *storageService) GetWorkDir(id string) (string, error) {
func (r *storageService) GetRunDir(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
return "", define.ErrNoSuchCtr
}
return "", err
diff --git a/libpod/util.go b/libpod/util.go
index 1753b4f34..a6e6a4f3e 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -20,7 +20,6 @@ import (
"github.com/fsnotify/fsnotify"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -93,7 +92,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
return false, err
}
case <-timeoutChan:
- return false, errors.Wrapf(define.ErrInternal, "timed out waiting for file %s", path)
+ return false, fmt.Errorf("timed out waiting for file %s: %w", path, define.ErrInternal)
}
}
}
@@ -123,15 +122,15 @@ func sortMounts(m []spec.Mount) []spec.Mount {
func validPodNSOption(p *Pod, ctrPod string) error {
if p == nil {
- return errors.Wrapf(define.ErrInvalidArg, "pod passed in was nil. Container may not be associated with a pod")
+ return fmt.Errorf("pod passed in was nil. Container may not be associated with a pod: %w", define.ErrInvalidArg)
}
if ctrPod == "" {
- return errors.Wrapf(define.ErrInvalidArg, "container is not a member of any pod")
+ return fmt.Errorf("container is not a member of any pod: %w", define.ErrInvalidArg)
}
if ctrPod != p.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "pod passed in is not the pod the container is associated with")
+ return fmt.Errorf("pod passed in is not the pod the container is associated with: %w", define.ErrInvalidArg)
}
return nil
}
@@ -232,18 +231,18 @@ func DefaultSeccompPath() (string, error) {
func checkDependencyContainer(depCtr, ctr *Container) error {
state, err := depCtr.State()
if err != nil {
- return errors.Wrapf(err, "error accessing dependency container %s state", depCtr.ID())
+ return fmt.Errorf("error accessing dependency container %s state: %w", depCtr.ID(), err)
}
if state == define.ContainerStateRemoving {
- return errors.Wrapf(define.ErrCtrStateInvalid, "cannot use container %s as a dependency as it is being removed", depCtr.ID())
+ return fmt.Errorf("cannot use container %s as a dependency as it is being removed: %w", depCtr.ID(), define.ErrCtrStateInvalid)
}
if depCtr.ID() == ctr.ID() {
- return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
+ return fmt.Errorf("must specify another container: %w", define.ErrInvalidArg)
}
if ctr.config.Pod != "" && depCtr.PodID() != ctr.config.Pod {
- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, depCtr.ID())
+ return fmt.Errorf("container has joined pod %s and dependency container %s is not a member of the pod: %w", ctr.config.Pod, depCtr.ID(), define.ErrInvalidArg)
}
return nil
@@ -347,7 +346,7 @@ func makeInspectPortBindings(bindings []types.PortMapping, expose map[uint16][]s
func writeStringToPath(path, contents, mountLabel string, uid, gid int) error {
f, err := os.Create(path)
if err != nil {
- return errors.Wrapf(err, "unable to create %s", path)
+ return fmt.Errorf("unable to create %s: %w", path, err)
}
defer f.Close()
if err := f.Chown(uid, gid); err != nil {
@@ -355,7 +354,7 @@ func writeStringToPath(path, contents, mountLabel string, uid, gid int) error {
}
if _, err := f.WriteString(contents); err != nil {
- return errors.Wrapf(err, "unable to write %s", path)
+ return fmt.Errorf("unable to write %s: %w", path, err)
}
// Relabel runDirResolv for the container
if err := label.Relabel(path, mountLabel, false); err != nil {
diff --git a/libpod/util_linux.go b/libpod/util_linux.go
index fe98056dc..7c79e6ce4 100644
--- a/libpod/util_linux.go
+++ b/libpod/util_linux.go
@@ -11,8 +11,8 @@ import (
"github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/rootless"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -20,7 +20,7 @@ import (
// systemdSliceFromPath makes a new systemd slice under the given parent with
// the given name.
// The parent must be a slice. The name must NOT include ".slice"
-func systemdSliceFromPath(parent, name string) (string, error) {
+func systemdSliceFromPath(parent, name string, resources *spec.LinuxResources) (string, error) {
cgroupPath, err := assembleSystemdCgroupName(parent, name)
if err != nil {
return "", err
@@ -28,8 +28,8 @@ func systemdSliceFromPath(parent, name string) (string, error) {
logrus.Debugf("Created cgroup path %s for parent %s and name %s", cgroupPath, parent, name)
- if err := makeSystemdCgroup(cgroupPath); err != nil {
- return "", errors.Wrapf(err, "error creating cgroup %s", cgroupPath)
+ if err := makeSystemdCgroup(cgroupPath, resources); err != nil {
+ return "", fmt.Errorf("error creating cgroup %s: %w", cgroupPath, err)
}
logrus.Debugf("Created cgroup %s", cgroupPath)
@@ -45,8 +45,12 @@ func getDefaultSystemdCgroup() string {
}
// makeSystemdCgroup creates a systemd Cgroup at the given location.
-func makeSystemdCgroup(path string) error {
- controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup())
+func makeSystemdCgroup(path string, resources *spec.LinuxResources) error {
+ res, err := GetLimits(resources)
+ if err != nil {
+ return err
+ }
+ controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup(), &res)
if err != nil {
return err
}
@@ -54,12 +58,20 @@ func makeSystemdCgroup(path string) error {
if rootless.IsRootless() {
return controller.CreateSystemdUserUnit(path, rootless.GetRootlessUID())
}
- return controller.CreateSystemdUnit(path)
+ err = controller.CreateSystemdUnit(path)
+ if err != nil {
+ return err
+ }
+ return nil
}
// deleteSystemdCgroup deletes the systemd cgroup at the given location
-func deleteSystemdCgroup(path string) error {
- controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup())
+func deleteSystemdCgroup(path string, resources *spec.LinuxResources) error {
+ res, err := GetLimits(resources)
+ if err != nil {
+ return err
+ }
+ controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup(), &res)
if err != nil {
return err
}
@@ -82,7 +94,7 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) {
const sliceSuffix = ".slice"
if !strings.HasSuffix(baseSlice, sliceSuffix) {
- return "", errors.Wrapf(define.ErrInvalidArg, "cannot assemble cgroup path with base %q - must end in .slice", baseSlice)
+ return "", fmt.Errorf("cannot assemble cgroup path with base %q - must end in .slice: %w", baseSlice, define.ErrInvalidArg)
}
noSlice := strings.TrimSuffix(baseSlice, sliceSuffix)
@@ -100,17 +112,17 @@ var lvpReleaseLabel = label.ReleaseLabel
func LabelVolumePath(path string) error {
_, mountLabel, err := lvpInitLabels([]string{})
if err != nil {
- return errors.Wrapf(err, "error getting default mountlabels")
+ return fmt.Errorf("error getting default mountlabels: %w", err)
}
if err := lvpReleaseLabel(mountLabel); err != nil {
- return errors.Wrapf(err, "error releasing label %q", mountLabel)
+ return fmt.Errorf("error releasing label %q: %w", mountLabel, err)
}
if err := lvpRelabel(path, mountLabel, true); err != nil {
if err == syscall.ENOTSUP {
logrus.Debugf("Labeling not supported on %q", path)
} else {
- return errors.Wrapf(err, "error setting selinux label for %s to %q as shared", path, mountLabel)
+ return fmt.Errorf("error setting selinux label for %s to %q as shared: %w", path, mountLabel, err)
}
}
return nil
diff --git a/libpod/volume.go b/libpod/volume.go
index ab461a37f..2e8cd77a5 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -55,6 +55,8 @@ type VolumeConfig struct {
// DisableQuota indicates that the volume should completely disable using any
// quota tracking.
DisableQuota bool `json:"disableQuota,omitempty"`
+ // Timeout allows users to override the default driver timeout of 5 seconds
+ Timeout int
}
// VolumeState holds the volume's mutable state.
diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go
index 3d721410b..dd2f3fd01 100644
--- a/libpod/volume_inspect.go
+++ b/libpod/volume_inspect.go
@@ -1,9 +1,10 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/podman/v4/libpod/define"
pluginapi "github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -29,7 +30,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
data.Mountpoint = v.state.MountPoint
if v.plugin == nil {
- return nil, errors.Wrapf(define.ErrMissingPlugin, "volume %s uses volume plugin %s but it is not available, cannot inspect", v.Name(), v.config.Driver)
+ return nil, fmt.Errorf("volume %s uses volume plugin %s but it is not available, cannot inspect: %w", v.Name(), v.config.Driver, define.ErrMissingPlugin)
}
// Retrieve status for the volume.
@@ -38,7 +39,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
req.Name = v.Name()
resp, err := v.plugin.GetVolume(req)
if err != nil {
- return nil, errors.Wrapf(err, "error retrieving volume %s information from plugin %s", v.Name(), v.Driver())
+ return nil, fmt.Errorf("error retrieving volume %s information from plugin %s: %w", v.Name(), v.Driver(), err)
}
if resp != nil {
data.Status = resp.Status
@@ -63,6 +64,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
data.MountCount = v.state.MountCount
data.NeedsCopyUp = v.state.NeedsCopyUp
data.NeedsChown = v.state.NeedsChown
+ data.Timeout = v.config.Timeout
return data, nil
}
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index e0ebb729d..43c3f9b0b 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -1,11 +1,11 @@
package libpod
import (
+ "fmt"
"os"
"path/filepath"
"github.com/containers/podman/v4/libpod/define"
- "github.com/pkg/errors"
)
// Creates a new volume
@@ -55,6 +55,12 @@ func (v *Volume) needsMount() bool {
if _, ok := v.config.Options["NOQUOTA"]; ok {
index++
}
+ if _, ok := v.config.Options["nocopy"]; ok {
+ index++
+ }
+ if _, ok := v.config.Options["copy"]; ok {
+ index++
+ }
// when uid or gid is set there is also the "o" option
// set so we have to ignore this one as well
if index > 0 {
@@ -84,7 +90,7 @@ func (v *Volume) save() error {
func (v *Volume) refresh() error {
lock, err := v.runtime.lockManager.AllocateAndRetrieveLock(v.config.LockID)
if err != nil {
- return errors.Wrapf(err, "acquiring lock %d for volume %s", v.config.LockID, v.Name())
+ return fmt.Errorf("acquiring lock %d for volume %s: %w", v.config.LockID, v.Name(), err)
}
v.lock = lock
diff --git a/libpod/volume_internal_linux.go b/libpod/volume_internal_linux.go
index 7d7dea9d0..cfd60554d 100644
--- a/libpod/volume_internal_linux.go
+++ b/libpod/volume_internal_linux.go
@@ -4,12 +4,13 @@
package libpod
import (
+ "errors"
+ "fmt"
"os/exec"
"strings"
"github.com/containers/podman/v4/libpod/define"
pluginapi "github.com/docker/go-plugins-helpers/volume"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -51,7 +52,7 @@ func (v *Volume) mount() error {
// the same one for everything.
if v.UsesVolumeDriver() {
if v.plugin == nil {
- return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
+ return fmt.Errorf("volume plugin %s (needed by volume %s) missing: %w", v.Driver(), v.Name(), define.ErrMissingPlugin)
}
req := new(pluginapi.MountRequest)
@@ -83,7 +84,7 @@ func (v *Volume) mount() error {
// TODO: might want to cache this path in the runtime?
mountPath, err := exec.LookPath("mount")
if err != nil {
- return errors.Wrapf(err, "locating 'mount' binary")
+ return fmt.Errorf("locating 'mount' binary: %w", err)
}
mountArgs := []string{}
if volOptions != "" {
@@ -103,7 +104,7 @@ func (v *Volume) mount() error {
logrus.Debugf("Running mount command: %s %s", mountPath, strings.Join(mountArgs, " "))
if output, err := mountCmd.CombinedOutput(); err != nil {
logrus.Debugf("Mount %v failed with %v", mountCmd, err)
- return errors.Errorf(string(output))
+ return errors.New(string(output))
}
logrus.Debugf("Mounted volume %s", v.Name())
@@ -148,7 +149,7 @@ func (v *Volume) unmount(force bool) error {
if v.state.MountCount == 0 {
if v.UsesVolumeDriver() {
if v.plugin == nil {
- return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
+ return fmt.Errorf("volume plugin %s (needed by volume %s) missing: %w", v.Driver(), v.Name(), define.ErrMissingPlugin)
}
req := new(pluginapi.UnmountRequest)
@@ -168,7 +169,7 @@ func (v *Volume) unmount(force bool) error {
// Ignore EINVAL - the mount no longer exists.
return nil
}
- return errors.Wrapf(err, "unmounting volume %s", v.Name())
+ return fmt.Errorf("unmounting volume %s: %w", v.Name(), err)
}
logrus.Debugf("Unmounted volume %s", v.Name())
}