summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go401
-rw-r--r--libpod/boltdb_state_internal.go301
-rw-r--r--libpod/common_test.go19
-rw-r--r--libpod/container.go159
-rw-r--r--libpod/container.log.go73
-rw-r--r--libpod/container_api.go279
-rw-r--r--libpod/container_attach_linux.go171
-rw-r--r--libpod/container_attach_unsupported.go13
-rw-r--r--libpod/container_commit.go10
-rw-r--r--libpod/container_graph.go19
-rw-r--r--libpod/container_inspect.go1352
-rw-r--r--libpod/container_internal.go394
-rw-r--r--libpod/container_internal_linux.go277
-rw-r--r--libpod/container_internal_unsupported.go15
-rw-r--r--libpod/container_log_linux.go15
-rw-r--r--libpod/container_log_unsupported.go6
-rw-r--r--libpod/container_top_linux.go9
-rw-r--r--libpod/container_top_unsupported.go10
-rw-r--r--libpod/define/config.go20
-rw-r--r--libpod/define/containerstate.go73
-rw-r--r--libpod/define/errors.go (renamed from libpod/errors.go)15
-rw-r--r--libpod/define/exec_codes.go30
-rw-r--r--libpod/define/version.go (renamed from libpod/version.go)2
-rw-r--r--libpod/diff.go56
-rw-r--r--libpod/driver/driver.go11
-rw-r--r--libpod/events.go55
-rw-r--r--libpod/events/config.go11
-rw-r--r--libpod/events/events.go2
-rw-r--r--libpod/events/filters.go5
-rw-r--r--libpod/events/journal_linux.go4
-rw-r--r--libpod/events/nullout.go3
-rw-r--r--libpod/healthcheck.go73
-rw-r--r--libpod/healthcheck_linux.go49
-rw-r--r--libpod/healthcheck_unsupported.go8
-rw-r--r--libpod/image/image.go200
-rw-r--r--libpod/image/prune.go2
-rw-r--r--libpod/image/pull.go34
-rw-r--r--libpod/image/search.go7
-rw-r--r--libpod/in_memory_state.go165
-rw-r--r--libpod/info.go16
-rw-r--r--libpod/kube.go172
-rw-r--r--libpod/lock/file/file_lock.go175
-rw-r--r--libpod/lock/file/file_lock_test.go74
-rw-r--r--libpod/lock/file_lock_manager.go110
-rw-r--r--libpod/lock/shm/shm_lock.c6
-rw-r--r--libpod/lock/shm/shm_lock.go5
-rw-r--r--libpod/lock/shm/shm_lock.h3
-rw-r--r--libpod/lock/shm/shm_lock_nocgo.go102
-rw-r--r--libpod/logs/log.go (renamed from libpod/container_log.go)99
-rw-r--r--libpod/networking_linux.go60
-rw-r--r--libpod/networking_unsupported.go14
-rw-r--r--libpod/oci.go200
-rw-r--r--libpod/oci_attach_linux.go260
-rw-r--r--libpod/oci_attach_linux_cgo.go11
-rw-r--r--libpod/oci_attach_linux_nocgo.go7
-rw-r--r--libpod/oci_attach_unsupported.go18
-rw-r--r--libpod/oci_internal_linux.go496
-rw-r--r--libpod/oci_linux.go387
-rw-r--r--libpod/oci_unsupported.go21
-rw-r--r--libpod/options.go320
-rw-r--r--libpod/pod.go15
-rw-r--r--libpod/pod_api.go45
-rw-r--r--libpod/pod_internal.go7
-rw-r--r--libpod/pod_top_linux.go3
-rw-r--r--libpod/pod_top_unsupported.go4
-rw-r--r--libpod/runtime.go643
-rw-r--r--libpod/runtime_cstorage.go119
-rw-r--r--libpod/runtime_ctr.go182
-rw-r--r--libpod/runtime_img.go3
-rw-r--r--libpod/runtime_migrate.go6
-rw-r--r--libpod/runtime_pod.go19
-rw-r--r--libpod/runtime_pod_infra_linux.go3
-rw-r--r--libpod/runtime_pod_linux.go58
-rw-r--r--libpod/runtime_pod_unsupported.go6
-rw-r--r--libpod/runtime_volume.go29
-rw-r--r--libpod/runtime_volume_linux.go9
-rw-r--r--libpod/runtime_volume_unsupported.go8
-rw-r--r--libpod/state_test.go5
-rw-r--r--libpod/stats.go27
-rw-r--r--libpod/stats_unsupported.go4
-rw-r--r--libpod/storage.go19
-rw-r--r--libpod/util.go40
-rw-r--r--libpod/util_linux.go46
-rw-r--r--libpod/util_unsupported.go11
-rw-r--r--libpod/volume.go1
85 files changed, 5747 insertions, 2469 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 63e40a98f..176781f07 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -5,6 +5,7 @@ import (
"strings"
"sync"
+ "github.com/containers/libpod/libpod/define"
bolt "github.com/etcd-io/bbolt"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
@@ -65,7 +66,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
if err != nil {
return nil, errors.Wrapf(err, "error opening database %s", path)
}
- // Everywhere else, we use s.closeDBCon(db) to ensure the state's DB
+ // Everywhere else, we use s.deferredCloseDBCon(db) to ensure the state's DB
// mutex is also unlocked.
// However, here, the mutex has not been locked, since we just created
// the DB connection, and it hasn't left this function yet - no risk of
@@ -73,42 +74,50 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
// As such, just a db.Close() is fine here.
defer db.Close()
- // Perform initial database setup
- err = db.Update(func(tx *bolt.Tx) error {
- if _, err := tx.CreateBucketIfNotExists(idRegistryBkt); err != nil {
- return errors.Wrapf(err, "error creating id-registry bucket")
- }
- if _, err := tx.CreateBucketIfNotExists(nameRegistryBkt); err != nil {
- return errors.Wrapf(err, "error creating name-registry bucket")
- }
- if _, err := tx.CreateBucketIfNotExists(nsRegistryBkt); err != nil {
- return errors.Wrapf(err, "error creating ns-registry bucket")
- }
- if _, err := tx.CreateBucketIfNotExists(ctrBkt); err != nil {
- return errors.Wrapf(err, "error creating containers bucket")
- }
- if _, err := tx.CreateBucketIfNotExists(allCtrsBkt); err != nil {
- return errors.Wrapf(err, "error creating all containers bucket")
- }
- if _, err := tx.CreateBucketIfNotExists(podBkt); err != nil {
- return errors.Wrapf(err, "error creating pods bucket")
- }
- if _, err := tx.CreateBucketIfNotExists(allPodsBkt); err != nil {
- return errors.Wrapf(err, "error creating all pods bucket")
- }
- if _, err := tx.CreateBucketIfNotExists(volBkt); err != nil {
- return errors.Wrapf(err, "error creating volume bucket")
- }
- if _, err := tx.CreateBucketIfNotExists(allVolsBkt); err != nil {
- return errors.Wrapf(err, "error creating all volumes bucket")
+ createBuckets := [][]byte{
+ idRegistryBkt,
+ nameRegistryBkt,
+ nsRegistryBkt,
+ ctrBkt,
+ allCtrsBkt,
+ podBkt,
+ allPodsBkt,
+ volBkt,
+ allVolsBkt,
+ runtimeConfigBkt,
+ }
+
+ // Does the DB need an update?
+ needsUpdate := false
+ err = db.View(func(tx *bolt.Tx) error {
+ for _, bkt := range createBuckets {
+ if test := tx.Bucket(bkt); test == nil {
+ needsUpdate = true
+ break
+ }
}
- if _, err := tx.CreateBucketIfNotExists(runtimeConfigBkt); err != nil {
- return errors.Wrapf(err, "error creating runtime-config bucket")
+ return nil
+ })
+ if err != nil {
+ return nil, errors.Wrapf(err, "error checking DB schema")
+ }
+
+ if !needsUpdate {
+ state.valid = true
+ return state, nil
+ }
+
+ // Ensure schema is properly created in DB
+ err = db.Update(func(tx *bolt.Tx) error {
+ for _, bkt := range createBuckets {
+ if _, err := tx.CreateBucketIfNotExists(bkt); err != nil {
+ return errors.Wrapf(err, "error creating bucket %s", string(bkt))
+ }
}
return nil
})
if err != nil {
- return nil, errors.Wrapf(err, "error creating initial database layout")
+ return nil, errors.Wrapf(err, "error creating buckets for DB")
}
state.valid = true
@@ -125,14 +134,14 @@ func (s *BoltState) Close() error {
// Refresh clears container and pod states after a reboot
func (s *BoltState) Refresh() error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
db, err := s.getDBCon()
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
idBucket, err := getIDBucket(tx)
@@ -163,13 +172,13 @@ func (s *BoltState) Refresh() error {
if podBkt == nil {
// This is neither a pod nor a container
// Error out on the dangling ID
- return errors.Wrapf(ErrInternal, "id %s is not a pod or a container", string(id))
+ return errors.Wrapf(define.ErrInternal, "id %s is not a pod or a container", string(id))
}
// Get the state
stateBytes := podBkt.Get(stateKey)
if stateBytes == nil {
- return errors.Wrapf(ErrInternal, "pod %s missing state key", string(id))
+ return errors.Wrapf(define.ErrInternal, "pod %s missing state key", string(id))
}
state := new(podState)
@@ -202,7 +211,7 @@ func (s *BoltState) Refresh() error {
stateBytes := ctrBkt.Get(stateKey)
if stateBytes == nil {
// Badly formatted container bucket
- return errors.Wrapf(ErrInternal, "container %s missing state in DB", string(id))
+ return errors.Wrapf(define.ErrInternal, "container %s missing state in DB", string(id))
}
state := new(ContainerState)
@@ -235,7 +244,7 @@ func (s *BoltState) Refresh() error {
// the database was first initialized
func (s *BoltState) GetDBConfig() (*DBConfig, error) {
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
cfg := new(DBConfig)
@@ -244,7 +253,7 @@ func (s *BoltState) GetDBConfig() (*DBConfig, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
configBucket, err := getRuntimeConfigBucket(tx)
@@ -282,14 +291,14 @@ func (s *BoltState) GetDBConfig() (*DBConfig, error) {
// ValidateDBConfig validates paths in the given runtime against the database
func (s *BoltState) ValidateDBConfig(runtime *Runtime) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
db, err := s.getDBCon()
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
// Check runtime configuration
if err := checkRuntimeConfig(db, runtime); err != nil {
@@ -316,11 +325,11 @@ func (s *BoltState) SetNamespace(ns string) error {
// Container retrieves a single container from the state by its full ID
func (s *BoltState) Container(id string) (*Container, error) {
if id == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
ctrID := []byte(id)
@@ -333,7 +342,7 @@ func (s *BoltState) Container(id string) (*Container, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@@ -354,11 +363,11 @@ func (s *BoltState) Container(id string) (*Container, error) {
// partial ID or name
func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
if idOrName == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
ctr := new(Container)
@@ -369,14 +378,9 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
- idBucket, err := getIDBucket(tx)
- if err != nil {
- return err
- }
-
ctrBucket, err := getCtrBucket(tx)
if err != nil {
return err
@@ -427,7 +431,7 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
// We were not given a full container ID or name.
// Search for partial ID matches.
exists := false
- err = idBucket.ForEach(func(checkID, checkName []byte) error {
+ err = ctrBucket.ForEach(func(checkID, checkName []byte) error {
// If the container isn't in our namespace, we
// can't match it
if s.namespaceBytes != nil {
@@ -438,7 +442,7 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
}
if strings.HasPrefix(string(checkID), idOrName) {
if exists {
- return errors.Wrapf(ErrCtrExists, "more than one result for container ID %s", idOrName)
+ return errors.Wrapf(define.ErrCtrExists, "more than one result for container ID %s", idOrName)
}
id = checkID
exists = true
@@ -450,9 +454,9 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
return err
} else if !exists {
if isPod {
- return errors.Wrapf(ErrNoSuchCtr, "%s is a pod, not a container", idOrName)
+ return errors.Wrapf(define.ErrNoSuchCtr, "%s is a pod, not a container", idOrName)
}
- return errors.Wrapf(ErrNoSuchCtr, "no container with name or ID %s found", idOrName)
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %s found", idOrName)
}
return s.getContainerFromDB(id, ctr, ctrBucket)
@@ -467,11 +471,11 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
// HasContainer checks if a container is present in the state
func (s *BoltState) HasContainer(id string) (bool, error) {
if id == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
if !s.valid {
- return false, ErrDBClosed
+ return false, define.ErrDBClosed
}
ctrID := []byte(id)
@@ -480,7 +484,7 @@ func (s *BoltState) HasContainer(id string) (bool, error) {
if err != nil {
return false, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
exists := false
@@ -515,15 +519,15 @@ func (s *BoltState) HasContainer(id string) (bool, error) {
// The container being added cannot belong to a pod
func (s *BoltState) AddContainer(ctr *Container) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !ctr.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
if ctr.config.Pod != "" {
- return errors.Wrapf(ErrInvalidArg, "cannot add a container that belongs to a pod with AddContainer - use AddContainerToPod")
+ return errors.Wrapf(define.ErrInvalidArg, "cannot add a container that belongs to a pod with AddContainer - use AddContainerToPod")
}
return s.addContainer(ctr, nil)
@@ -534,18 +538,18 @@ func (s *BoltState) AddContainer(ctr *Container) error {
// pod, use RemoveContainerFromPod
func (s *BoltState) RemoveContainer(ctr *Container) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if ctr.config.Pod != "" {
- return errors.Wrapf(ErrPodExists, "container %s is part of a pod, use RemoveContainerFromPod instead", ctr.ID())
+ return errors.Wrapf(define.ErrPodExists, "container %s is part of a pod, use RemoveContainerFromPod instead", ctr.ID())
}
db, err := s.getDBCon()
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
return s.removeContainer(ctr, nil, tx)
@@ -556,15 +560,15 @@ func (s *BoltState) RemoveContainer(ctr *Container) error {
// UpdateContainer updates a container's state from the database
func (s *BoltState) UpdateContainer(ctr *Container) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !ctr.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
}
newState := new(ContainerState)
@@ -576,7 +580,7 @@ func (s *BoltState) UpdateContainer(ctr *Container) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@@ -587,12 +591,12 @@ func (s *BoltState) UpdateContainer(ctr *Container) error {
ctrToUpdate := ctrBucket.Bucket(ctrID)
if ctrToUpdate == nil {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
}
newStateBytes := ctrToUpdate.Get(stateKey)
if newStateBytes == nil {
- return errors.Wrapf(ErrInternal, "container %s does not have a state key in DB", ctr.ID())
+ return errors.Wrapf(define.ErrInternal, "container %s does not have a state key in DB", ctr.ID())
}
if err := json.Unmarshal(newStateBytes, newState); err != nil {
@@ -624,15 +628,15 @@ func (s *BoltState) UpdateContainer(ctr *Container) error {
// SaveContainer saves a container's current state in the database
func (s *BoltState) SaveContainer(ctr *Container) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !ctr.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
}
stateJSON, err := json.Marshal(ctr.state)
@@ -647,7 +651,7 @@ func (s *BoltState) SaveContainer(ctr *Container) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@@ -658,7 +662,7 @@ func (s *BoltState) SaveContainer(ctr *Container) error {
ctrToSave := ctrBucket.Bucket(ctrID)
if ctrToSave == nil {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "container %s does not exist in DB", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in DB", ctr.ID())
}
// Update the state
@@ -687,15 +691,15 @@ func (s *BoltState) SaveContainer(ctr *Container) error {
// container. If the slice is empty, no containers depend on the given container
func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
if !ctr.valid {
- return nil, ErrCtrRemoved
+ return nil, define.ErrCtrRemoved
}
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return nil, errors.Wrapf(ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
}
depCtrs := []string{}
@@ -704,7 +708,7 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@@ -715,12 +719,12 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
ctrDB := ctrBucket.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
}
dependsBkt := ctrDB.Bucket(dependenciesBkt)
if dependsBkt == nil {
- return errors.Wrapf(ErrInternal, "container %s has no dependencies bucket", ctr.ID())
+ return errors.Wrapf(define.ErrInternal, "container %s has no dependencies bucket", ctr.ID())
}
// Iterate through and add dependencies
@@ -746,7 +750,7 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
// AllContainers retrieves all the containers in the database
func (s *BoltState) AllContainers() ([]*Container, error) {
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
ctrs := []*Container{}
@@ -755,7 +759,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
allCtrsBucket, err := getAllCtrsBucket(tx)
@@ -774,7 +778,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
// be much less helpful.
ctrExists := ctrBucket.Bucket(id)
if ctrExists == nil {
- return errors.Wrapf(ErrInternal, "state is inconsistent - container ID %s in all containers, but container not found", string(id))
+ return errors.Wrapf(define.ErrInternal, "state is inconsistent - container ID %s in all containers, but container not found", string(id))
}
ctr := new(Container)
@@ -786,7 +790,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
// ignore it safely.
// We just won't include the container in the
// results.
- if errors.Cause(err) != ErrNSMismatch {
+ if errors.Cause(err) != define.ErrNSMismatch {
// Even if it's not an NS mismatch, it's
// not worth erroring over.
// If we do, a single bad container JSON
@@ -813,11 +817,11 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
// comment on this function in state.go.
func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !ctr.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
newCfgJSON, err := json.Marshal(newCfg)
@@ -829,7 +833,7 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
ctrBkt, err := getCtrBucket(tx)
@@ -840,7 +844,7 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
}
if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
@@ -857,11 +861,11 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
// comment on this function in state.go.
func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
newCfgJSON, err := json.Marshal(newCfg)
@@ -873,7 +877,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -884,7 +888,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
podDB := podBkt.Bucket([]byte(pod.ID()))
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
}
if err := podDB.Put(configKey, newCfgJSON); err != nil {
@@ -899,11 +903,11 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
podID := []byte(id)
@@ -916,7 +920,7 @@ func (s *BoltState) Pod(id string) (*Pod, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -936,11 +940,11 @@ func (s *BoltState) Pod(id string) (*Pod, error) {
// LookupPod retrieves a pod from full or unique partial ID or name
func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
if idOrName == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
pod := new(Pod)
@@ -951,14 +955,9 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
- idBucket, err := getIDBucket(tx)
- if err != nil {
- return err
- }
-
podBkt, err := getPodBucket(tx)
if err != nil {
return err
@@ -1006,7 +1005,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
// They did not give us a full pod name or ID.
// Search for partial ID matches.
exists := false
- err = idBucket.ForEach(func(checkID, checkName []byte) error {
+ err = podBkt.ForEach(func(checkID, checkName []byte) error {
// If the pod isn't in our namespace, we
// can't match it
if s.namespaceBytes != nil {
@@ -1017,7 +1016,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
}
if strings.HasPrefix(string(checkID), idOrName) {
if exists {
- return errors.Wrapf(ErrPodExists, "more than one result for ID or name %s", idOrName)
+ return errors.Wrapf(define.ErrPodExists, "more than one result for ID or name %s", idOrName)
}
id = checkID
exists = true
@@ -1029,9 +1028,9 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
return err
} else if !exists {
if isCtr {
- return errors.Wrapf(ErrNoSuchPod, "%s is a container, not a pod", idOrName)
+ return errors.Wrapf(define.ErrNoSuchPod, "%s is a container, not a pod", idOrName)
}
- return errors.Wrapf(ErrNoSuchPod, "no pod with name or ID %s found", idOrName)
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod with name or ID %s found", idOrName)
}
// We might have found a container ID, but it's OK
@@ -1048,11 +1047,11 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
// HasPod checks if a pod with the given ID exists in the state
func (s *BoltState) HasPod(id string) (bool, error) {
if id == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
if !s.valid {
- return false, ErrDBClosed
+ return false, define.ErrDBClosed
}
podID := []byte(id)
@@ -1063,7 +1062,7 @@ func (s *BoltState) HasPod(id string) (bool, error) {
if err != nil {
return false, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -1095,19 +1094,19 @@ func (s *BoltState) HasPod(id string) (bool, error) {
// PodHasContainer checks if the given pod has a container with the given ID
func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
if id == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
if !s.valid {
- return false, ErrDBClosed
+ return false, define.ErrDBClosed
}
if !pod.valid {
- return false, ErrPodRemoved
+ return false, define.ErrPodRemoved
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return false, errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return false, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
ctrID := []byte(id)
@@ -1119,7 +1118,7 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
if err != nil {
return false, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -1131,13 +1130,13 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
}
// Don't bother with a namespace check on the container -
@@ -1162,15 +1161,15 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
// PodContainersByID returns the IDs of all containers present in the given pod
func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
if !pod.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return nil, errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return nil, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
podID := []byte(pod.ID())
@@ -1181,7 +1180,7 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -1193,13 +1192,13 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
}
// Iterate through all containers in the pod
@@ -1224,15 +1223,15 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
// PodContainers returns all the containers present in the given pod
func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
if !pod.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return nil, errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return nil, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
podID := []byte(pod.ID())
@@ -1243,7 +1242,7 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -1260,13 +1259,13 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "pod %s not found in database", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID())
}
// Get pod containers bucket
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
+ return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID())
}
// Iterate through all containers in the pod
@@ -1295,11 +1294,11 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
// the sub bucket holding the container dependencies that this volume has
func (s *BoltState) AddVolume(volume *Volume) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !volume.valid {
- return ErrVolumeRemoved
+ return define.ErrVolumeRemoved
}
volName := []byte(volume.Name())
@@ -1313,7 +1312,7 @@ func (s *BoltState) AddVolume(volume *Volume) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
volBkt, err := getVolBucket(tx)
@@ -1329,7 +1328,7 @@ func (s *BoltState) AddVolume(volume *Volume) error {
// Check if we already have a volume with the given name
volExists := allVolsBkt.Get(volName)
if volExists != nil {
- return errors.Wrapf(ErrVolumeExists, "name %s is in use", volume.Name())
+ return errors.Wrapf(define.ErrVolumeExists, "name %s is in use", volume.Name())
}
// We are good to add the volume
@@ -1361,7 +1360,7 @@ func (s *BoltState) AddVolume(volume *Volume) error {
// RemoveVolume removes the given volume from the state
func (s *BoltState) RemoveVolume(volume *Volume) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
volName := []byte(volume.Name())
@@ -1370,7 +1369,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
volBkt, err := getVolBucket(tx)
@@ -1392,7 +1391,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
volDB := volBkt.Bucket(volName)
if volDB == nil {
volume.valid = false
- return errors.Wrapf(ErrNoSuchVolume, "volume %s does not exist in DB", volume.Name())
+ return errors.Wrapf(define.ErrNoSuchVolume, "volume %s does not exist in DB", volume.Name())
}
// Check if volume is not being used by any container
@@ -1422,7 +1421,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return errors.Wrapf(err, "error getting list of dependencies from dependencies bucket for volumes %q", volume.Name())
}
if len(deps) > 0 {
- return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by container(s) %s", volume.Name(), strings.Join(deps, ","))
+ return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by container(s) %s", volume.Name(), strings.Join(deps, ","))
}
}
@@ -1443,7 +1442,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
// AllVolumes returns all volumes present in the state
func (s *BoltState) AllVolumes() ([]*Volume, error) {
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
volumes := []*Volume{}
@@ -1452,7 +1451,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
allVolsBucket, err := getAllVolsBucket(tx)
@@ -1469,14 +1468,14 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
// This check can be removed if performance becomes an
// issue, but much less helpful errors will be produced
if volExists == nil {
- return errors.Wrapf(ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
+ return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
}
volume := new(Volume)
volume.config = new(VolumeConfig)
if err := s.getVolumeFromDB(id, volume, volBucket); err != nil {
- if errors.Cause(err) != ErrNSMismatch {
+ if errors.Cause(err) != define.ErrNSMismatch {
logrus.Errorf("Error retrieving volume %s from the database: %v", string(id), err)
}
} else {
@@ -1497,11 +1496,11 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
// Volume retrieves a volume from full name
func (s *BoltState) Volume(name string) (*Volume, error) {
if name == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
volName := []byte(name)
@@ -1513,7 +1512,7 @@ func (s *BoltState) Volume(name string) (*Volume, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
volBkt, err := getVolBucket(tx)
@@ -1533,11 +1532,11 @@ func (s *BoltState) Volume(name string) (*Volume, error) {
// HasVolume returns true if the given volume exists in the state, otherwise it returns false
func (s *BoltState) HasVolume(name string) (bool, error) {
if name == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
if !s.valid {
- return false, ErrDBClosed
+ return false, define.ErrDBClosed
}
volName := []byte(name)
@@ -1548,7 +1547,7 @@ func (s *BoltState) HasVolume(name string) (bool, error) {
if err != nil {
return false, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
volBkt, err := getVolBucket(tx)
@@ -1575,11 +1574,11 @@ func (s *BoltState) HasVolume(name string) (bool, error) {
// volume. If the slice is empty, no containers use the given volume
func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
if !volume.valid {
- return nil, ErrVolumeRemoved
+ return nil, define.ErrVolumeRemoved
}
depCtrs := []string{}
@@ -1588,7 +1587,7 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
volBucket, err := getVolBucket(tx)
@@ -1604,12 +1603,12 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
volDB := volBucket.Bucket([]byte(volume.Name()))
if volDB == nil {
volume.valid = false
- return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in DB", volume.Name())
+ return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in DB", volume.Name())
}
dependsBkt := volDB.Bucket(volDependenciesBkt)
if dependsBkt == nil {
- return errors.Wrapf(ErrInternal, "volume %s has no dependencies bucket", volume.Name())
+ return errors.Wrapf(define.ErrInternal, "volume %s has no dependencies bucket", volume.Name())
}
// Iterate through and add dependencies
@@ -1641,15 +1640,15 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
// AddPod adds the given pod to the state.
func (s *BoltState) AddPod(pod *Pod) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
podID := []byte(pod.ID())
@@ -1674,7 +1673,7 @@ func (s *BoltState) AddPod(pod *Pod) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -1705,11 +1704,11 @@ func (s *BoltState) AddPod(pod *Pod) error {
// Check if we already have something with the given ID and name
idExist := idsBkt.Get(podID)
if idExist != nil {
- return errors.Wrapf(ErrPodExists, "ID %s is in use", pod.ID())
+ return errors.Wrapf(define.ErrPodExists, "ID %s is in use", pod.ID())
}
nameExist := namesBkt.Get(podName)
if nameExist != nil {
- return errors.Wrapf(ErrPodExists, "name %s is in use", pod.Name())
+ return errors.Wrapf(define.ErrPodExists, "name %s is in use", pod.Name())
}
// We are good to add the pod
@@ -1765,15 +1764,15 @@ func (s *BoltState) AddPod(pod *Pod) error {
// Only empty pods can be removed
func (s *BoltState) RemovePod(pod *Pod) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
podID := []byte(pod.ID())
@@ -1783,7 +1782,7 @@ func (s *BoltState) RemovePod(pod *Pod) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -1815,7 +1814,7 @@ func (s *BoltState) RemovePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "pod %s does not exist in DB", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in DB", pod.ID())
}
// Check if pod is empty
@@ -1827,7 +1826,7 @@ func (s *BoltState) RemovePod(pod *Pod) error {
if podCtrsBkt != nil {
cursor := podCtrsBkt.Cursor()
if id, _ := cursor.First(); id != nil {
- return errors.Wrapf(ErrCtrExists, "pod %s is not empty", pod.ID())
+ return errors.Wrapf(define.ErrCtrExists, "pod %s is not empty", pod.ID())
}
}
@@ -1861,15 +1860,15 @@ func (s *BoltState) RemovePod(pod *Pod) error {
// RemovePodContainers removes all containers in a pod
func (s *BoltState) RemovePodContainers(pod *Pod) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
podID := []byte(pod.ID())
@@ -1878,7 +1877,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@@ -1910,12 +1909,12 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "pod %s does not exist in DB", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in DB", pod.ID())
}
podCtrsBkt := podDB.Bucket(containersBkt)
if podCtrsBkt == nil {
- return errors.Wrapf(ErrInternal, "pod %s does not have a containers bucket", pod.ID())
+ return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID())
}
// Traverse all containers in the pod with a cursor
@@ -1926,7 +1925,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
if ctr == nil {
// This should never happen
// State is inconsistent
- return errors.Wrapf(ErrNoSuchCtr, "pod %s referenced nonexistant container %s", pod.ID(), string(id))
+ return errors.Wrapf(define.ErrNoSuchCtr, "pod %s referenced nonexistant container %s", pod.ID(), string(id))
}
ctrDeps := ctr.Bucket(dependenciesBkt)
// This should never be nil, but if it is, we're
@@ -1935,7 +1934,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
err = ctrDeps.ForEach(func(depID, name []byte) error {
exists := podCtrsBkt.Get(depID)
if exists == nil {
- return errors.Wrapf(ErrCtrExists, "container %s has dependency %s outside of pod %s", string(id), string(depID), pod.ID())
+ return errors.Wrapf(define.ErrCtrExists, "container %s has dependency %s outside of pod %s", string(id), string(depID), pod.ID())
}
return nil
})
@@ -1947,7 +1946,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
// Dependencies are set, we're clear to remove
if err := ctrBkt.DeleteBucket(id); err != nil {
- return errors.Wrapf(ErrInternal, "error deleting container %s from DB", string(id))
+ return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", string(id))
}
if err := idsBkt.Delete(id); err != nil {
@@ -1989,19 +1988,19 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
// The container will be added to the state and the pod
func (s *BoltState) AddContainerToPod(pod *Pod, ctr *Container) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if !ctr.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
if ctr.config.Pod != pod.ID() {
- return errors.Wrapf(ErrNoSuchCtr, "container %s is not part of pod %s", ctr.ID(), pod.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not part of pod %s", ctr.ID(), pod.ID())
}
return s.addContainer(ctr, pod)
@@ -2011,35 +2010,35 @@ func (s *BoltState) AddContainerToPod(pod *Pod, ctr *Container) error {
// The container will also be removed from the state
func (s *BoltState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if s.namespace != "" {
if s.namespace != pod.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
if s.namespace != ctr.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "container %s in in namespace %q but we are in namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "container %s in in namespace %q but we are in namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
}
}
if ctr.config.Pod == "" {
- return errors.Wrapf(ErrNoSuchPod, "container %s is not part of a pod, use RemoveContainer instead", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "container %s is not part of a pod, use RemoveContainer instead", ctr.ID())
}
if ctr.config.Pod != pod.ID() {
- return errors.Wrapf(ErrInvalidArg, "container %s is not part of pod %s", ctr.ID(), pod.ID())
+ return errors.Wrapf(define.ErrInvalidArg, "container %s is not part of pod %s", ctr.ID(), pod.ID())
}
db, err := s.getDBCon()
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
return s.removeContainer(ctr, pod, tx)
@@ -2050,15 +2049,15 @@ func (s *BoltState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
// UpdatePod updates a pod's state from the database
func (s *BoltState) UpdatePod(pod *Pod) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
newState := new(podState)
@@ -2067,7 +2066,7 @@ func (s *BoltState) UpdatePod(pod *Pod) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
podID := []byte(pod.ID())
@@ -2080,13 +2079,13 @@ func (s *BoltState) UpdatePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in database", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in database", pod.ID())
}
// Get the pod state JSON
podStateBytes := podDB.Get(stateKey)
if podStateBytes == nil {
- return errors.Wrapf(ErrInternal, "pod %s is missing state key in DB", pod.ID())
+ return errors.Wrapf(define.ErrInternal, "pod %s is missing state key in DB", pod.ID())
}
if err := json.Unmarshal(podStateBytes, newState); err != nil {
@@ -2107,15 +2106,15 @@ func (s *BoltState) UpdatePod(pod *Pod) error {
// SavePod saves a pod's state to the database
func (s *BoltState) SavePod(pod *Pod) error {
if !s.valid {
- return ErrDBClosed
+ return define.ErrDBClosed
}
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if s.namespace != "" && s.namespace != pod.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
stateJSON, err := json.Marshal(pod.state)
@@ -2127,7 +2126,7 @@ func (s *BoltState) SavePod(pod *Pod) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
podID := []byte(pod.ID())
@@ -2140,7 +2139,7 @@ func (s *BoltState) SavePod(pod *Pod) error {
podDB := podBkt.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in database", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in database", pod.ID())
}
// Set the pod state JSON
@@ -2160,7 +2159,7 @@ func (s *BoltState) SavePod(pod *Pod) error {
// AllPods returns all pods present in the state
func (s *BoltState) AllPods() ([]*Pod, error) {
if !s.valid {
- return nil, ErrDBClosed
+ return nil, define.ErrDBClosed
}
pods := []*Pod{}
@@ -2169,7 +2168,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
if err != nil {
return nil, err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
allPodsBucket, err := getAllPodsBucket(tx)
@@ -2187,7 +2186,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
// This check can be removed if performance becomes an
// issue, but much less helpful errors will be produced
if podExists == nil {
- return errors.Wrapf(ErrInternal, "inconsistency in state - pod %s is in all pods bucket but pod not found", string(id))
+ return errors.Wrapf(define.ErrInternal, "inconsistency in state - pod %s is in all pods bucket but pod not found", string(id))
}
pod := new(Pod)
@@ -2195,7 +2194,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
pod.state = new(podState)
if err := s.getPodFromDB(id, pod, podBucket); err != nil {
- if errors.Cause(err) != ErrNSMismatch {
+ if errors.Cause(err) != define.ErrNSMismatch {
logrus.Errorf("Error retrieving pod %s from the database: %v", string(id), err)
}
} else {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 313e5f4d7..408ef7224 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -2,9 +2,11 @@ package libpod
import (
"bytes"
+ "path/filepath"
"runtime"
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
bolt "github.com/etcd-io/bbolt"
@@ -72,98 +74,160 @@ var (
volPathKey = []byte(volPathName)
)
+// This represents a field in the runtime configuration that will be validated
+// against the DB to ensure no configuration mismatches occur.
+type dbConfigValidation struct {
+ name string // Only used for error messages
+ runtimeValue string
+ key []byte
+ defaultValue string
+}
+
// Check if the configuration of the database is compatible with the
// configuration of the runtime opening it
// If there is no runtime configuration loaded, load our own
func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
- err := db.Update(func(tx *bolt.Tx) error {
+ storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
+ if err != nil {
+ return err
+ }
+
+ // We need to validate the following things
+ checks := []dbConfigValidation{
+ {
+ "OS",
+ runtime.GOOS,
+ osKey,
+ runtime.GOOS,
+ },
+ {
+ "libpod root directory (staticdir)",
+ rt.config.StaticDir,
+ staticDirKey,
+ "",
+ },
+ {
+ "libpod temporary files directory (tmpdir)",
+ rt.config.TmpDir,
+ tmpDirKey,
+ "",
+ },
+ {
+ "storage temporary directory (runroot)",
+ rt.config.StorageConfig.RunRoot,
+ runRootKey,
+ storeOpts.RunRoot,
+ },
+ {
+ "storage graph root directory (graphroot)",
+ rt.config.StorageConfig.GraphRoot,
+ graphRootKey,
+ storeOpts.GraphRoot,
+ },
+ {
+ "storage graph driver",
+ rt.config.StorageConfig.GraphDriverName,
+ graphDriverKey,
+ storeOpts.GraphDriverName,
+ },
+ {
+ "volume path",
+ rt.config.VolumePath,
+ volPathKey,
+ "",
+ },
+ }
+
+ // These fields were missing and will have to be recreated.
+ missingFields := []dbConfigValidation{}
+
+ // Let's try and validate read-only first
+ err = db.View(func(tx *bolt.Tx) error {
configBkt, err := getRuntimeConfigBucket(tx)
if err != nil {
return err
}
- if err := validateDBAgainstConfig(configBkt, "OS", runtime.GOOS, osKey, runtime.GOOS); err != nil {
- return err
+ for _, check := range checks {
+ exists, err := readOnlyValidateConfig(configBkt, check)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ missingFields = append(missingFields, check)
+ }
}
- if err := validateDBAgainstConfig(configBkt, "libpod root directory (staticdir)",
- rt.config.StaticDir, staticDirKey, ""); err != nil {
- return err
- }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
- if err := validateDBAgainstConfig(configBkt, "libpod temporary files directory (tmpdir)",
- rt.config.TmpDir, tmpDirKey, ""); err != nil {
- return err
- }
+ if len(missingFields) == 0 {
+ return nil
+ }
- storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
+ // Populate missing fields
+ return db.Update(func(tx *bolt.Tx) error {
+ configBkt, err := getRuntimeConfigBucket(tx)
if err != nil {
return err
}
- if err := validateDBAgainstConfig(configBkt, "storage temporary directory (runroot)",
- rt.config.StorageConfig.RunRoot, runRootKey,
- storeOpts.RunRoot); err != nil {
- return err
- }
- if err := validateDBAgainstConfig(configBkt, "storage graph root directory (graphroot)",
- rt.config.StorageConfig.GraphRoot, graphRootKey,
- storeOpts.GraphRoot); err != nil {
- return err
- }
+ for _, missing := range missingFields {
+ dbValue := []byte(missing.runtimeValue)
+ if missing.runtimeValue == "" && missing.defaultValue != "" {
+ dbValue = []byte(missing.defaultValue)
+ }
- if err := validateDBAgainstConfig(configBkt, "storage graph driver",
- rt.config.StorageConfig.GraphDriverName,
- graphDriverKey,
- storeOpts.GraphDriverName); err != nil {
- return err
+ if err := configBkt.Put(missing.key, dbValue); err != nil {
+ return errors.Wrapf(err, "error updating %s in DB runtime config", missing.name)
+ }
}
- return validateDBAgainstConfig(configBkt, "volume path",
- rt.config.VolumePath, volPathKey, "")
+ return nil
})
-
- return err
}
-// Validate a configuration entry in the DB against current runtime config
-// If the given configuration key does not exist it will be created
-// If the given runtimeValue or value retrieved from the database are the empty
-// string and defaultValue is not, defaultValue will be checked instead. This
-// ensures that we will not fail on configuration changes in configured c/storage.
-func validateDBAgainstConfig(bucket *bolt.Bucket, fieldName, runtimeValue string, keyName []byte, defaultValue string) error {
- keyBytes := bucket.Get(keyName)
+// Attempt a read-only validation of a configuration entry in the DB against an
+// element of the current runtime configuration.
+// If the configuration key in question does not exist, (false, nil) will be
+// returned.
+// If the configuration key does exist, and matches the runtime configuration
+// successfully, (true, nil) is returned.
+// An error is only returned when validation fails.
+// if the given runtimeValue or value retrieved from the database are empty,
+// and defaultValue is not, defaultValue will be checked instead. This ensures
+// that we will not fail on configuration changes in c/storage (where we may
+// pass the empty string to use defaults).
+func readOnlyValidateConfig(bucket *bolt.Bucket, toCheck dbConfigValidation) (bool, error) {
+ keyBytes := bucket.Get(toCheck.key)
if keyBytes == nil {
- dbValue := []byte(runtimeValue)
- if runtimeValue == "" && defaultValue != "" {
- dbValue = []byte(defaultValue)
- }
+ // False return indicates missing key
+ return false, nil
+ }
- if err := bucket.Put(keyName, dbValue); err != nil {
- return errors.Wrapf(err, "error updating %s in DB runtime config", fieldName)
- }
- } else {
- if runtimeValue != string(keyBytes) {
- // If runtimeValue is the empty string, check against
- // the default
- if runtimeValue == "" && defaultValue != "" &&
- string(keyBytes) == defaultValue {
- return nil
- }
+ dbValue := string(keyBytes)
- // If DB value is the empty string, check that the
- // runtime value is the default
- if string(keyBytes) == "" && defaultValue != "" &&
- runtimeValue == defaultValue {
- return nil
- }
+ if toCheck.runtimeValue != dbValue {
+ // If the runtime value is the empty string and default is not,
+ // check against default.
+ if toCheck.runtimeValue == "" && toCheck.defaultValue != "" && dbValue == toCheck.defaultValue {
+ return true, nil
+ }
- return errors.Wrapf(ErrDBBadConfig, "database %s %s does not match our %s %s",
- fieldName, string(keyBytes), fieldName, runtimeValue)
+ // If the DB value is the empty string, check that the runtime
+ // value is the default.
+ if dbValue == "" && toCheck.defaultValue != "" && toCheck.runtimeValue == toCheck.defaultValue {
+ return true, nil
}
+
+ return true, errors.Wrapf(define.ErrDBBadConfig, "database %s %q does not match our %s %q",
+ toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue)
}
- return nil
+ return true, nil
}
// Open a connection to the database.
@@ -183,6 +247,15 @@ func (s *BoltState) getDBCon() (*bolt.DB, error) {
return db, nil
}
+// deferredCloseDBCon closes the bolt db but instead of returning an
+// error it logs the error. it is meant to be used within the confines
+// of a defer statement only
+func (s *BoltState) deferredCloseDBCon(db *bolt.DB) {
+ if err := s.closeDBCon(db); err != nil {
+ logrus.Errorf("failed to close libpod db: %q", err)
+ }
+}
+
// Close a connection to the database.
// MUST be used in place of `db.Close()` to ensure proper unlocking of the
// state.
@@ -197,7 +270,7 @@ func (s *BoltState) closeDBCon(db *bolt.DB) error {
func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(idRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "id registry bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "id registry bucket not found in DB")
}
return bkt, nil
}
@@ -205,7 +278,7 @@ func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(nameRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "name registry bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "name registry bucket not found in DB")
}
return bkt, nil
}
@@ -213,7 +286,7 @@ func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(nsRegistryBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "namespace registry bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "namespace registry bucket not found in DB")
}
return bkt, nil
}
@@ -221,7 +294,7 @@ func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(ctrBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "containers bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "containers bucket not found in DB")
}
return bkt, nil
}
@@ -229,7 +302,7 @@ func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allCtrsBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "all containers bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "all containers bucket not found in DB")
}
return bkt, nil
}
@@ -237,7 +310,7 @@ func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(podBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "pods bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "pods bucket not found in DB")
}
return bkt, nil
}
@@ -245,7 +318,7 @@ func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allPodsBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "all pods bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "all pods bucket not found in DB")
}
return bkt, nil
}
@@ -253,7 +326,7 @@ func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(volBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "volumes bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "volumes bucket not found in DB")
}
return bkt, nil
}
@@ -261,7 +334,7 @@ func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(allVolsBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "all volumes bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "all volumes bucket not found in DB")
}
return bkt, nil
}
@@ -269,28 +342,27 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(runtimeConfigBkt)
if bkt == nil {
- return nil, errors.Wrapf(ErrDBBadConfig, "runtime configuration bucket not found in DB")
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "runtime configuration bucket not found in DB")
}
return bkt, nil
}
func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.Bucket) error {
- valid := true
ctrBkt := ctrsBkt.Bucket(id)
if ctrBkt == nil {
- return errors.Wrapf(ErrNoSuchCtr, "container %s not found in DB", string(id))
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id))
}
if s.namespaceBytes != nil {
ctrNamespaceBytes := ctrBkt.Get(namespaceKey)
if !bytes.Equal(s.namespaceBytes, ctrNamespaceBytes) {
- return errors.Wrapf(ErrNSMismatch, "cannot retrieve container %s as it is part of namespace %q and we are in namespace %q", string(id), string(ctrNamespaceBytes), s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve container %s as it is part of namespace %q and we are in namespace %q", string(id), string(ctrNamespaceBytes), s.namespace)
}
}
configBytes := ctrBkt.Get(configKey)
if configBytes == nil {
- return errors.Wrapf(ErrInternal, "container %s missing config key in DB", string(id))
+ return errors.Wrapf(define.ErrInternal, "container %s missing config key in DB", string(id))
}
if err := json.Unmarshal(configBytes, ctr.config); err != nil {
@@ -304,8 +376,25 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
}
ctr.lock = lock
+ if ctr.config.OCIRuntime == "" {
+ ctr.ociRuntime = s.runtime.defaultOCIRuntime
+ } else {
+ // Handle legacy containers which might use a literal path for
+ // their OCI runtime name.
+ runtimeName := ctr.config.OCIRuntime
+ if strings.HasPrefix(runtimeName, "/") {
+ runtimeName = filepath.Base(runtimeName)
+ }
+
+ ociRuntime, ok := s.runtime.ociRuntimes[runtimeName]
+ if !ok {
+ return errors.Wrapf(define.ErrInternal, "container %s was created with OCI runtime %s, but that runtime is not available in the current configuration", ctr.ID(), ctr.config.OCIRuntime)
+ }
+ ctr.ociRuntime = ociRuntime
+ }
+
ctr.runtime = s.runtime
- ctr.valid = valid
+ ctr.valid = true
return nil
}
@@ -313,19 +402,19 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error {
podDB := podBkt.Bucket(id)
if podDB == nil {
- return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found", string(id))
+ return errors.Wrapf(define.ErrNoSuchPod, "pod with ID %s not found", string(id))
}
if s.namespaceBytes != nil {
podNamespaceBytes := podDB.Get(namespaceKey)
if !bytes.Equal(s.namespaceBytes, podNamespaceBytes) {
- return errors.Wrapf(ErrNSMismatch, "cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q", string(id), string(podNamespaceBytes), s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q", string(id), string(podNamespaceBytes), s.namespace)
}
}
podConfigBytes := podDB.Get(configKey)
if podConfigBytes == nil {
- return errors.Wrapf(ErrInternal, "pod %s is missing configuration key in DB", string(id))
+ return errors.Wrapf(define.ErrInternal, "pod %s is missing configuration key in DB", string(id))
}
if err := json.Unmarshal(podConfigBytes, pod.config); err != nil {
@@ -348,12 +437,12 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bucket) error {
volDB := volBkt.Bucket(name)
if volDB == nil {
- return errors.Wrapf(ErrNoSuchVolume, "volume with name %s not found", string(name))
+ return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %s not found", string(name))
}
volConfigBytes := volDB.Get(configKey)
if volConfigBytes == nil {
- return errors.Wrapf(ErrInternal, "volume %s is missing configuration key in DB", string(name))
+ return errors.Wrapf(define.ErrInternal, "volume %s is missing configuration key in DB", string(name))
}
if err := json.Unmarshal(volConfigBytes, volume.config); err != nil {
@@ -370,7 +459,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
// If pod is not nil, the container is added to the pod as well
func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if s.namespace != "" && s.namespace != ctr.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "cannot add container %s as it is in namespace %q and we are in namespace %q",
+ return errors.Wrapf(define.ErrNSMismatch, "cannot add container %s as it is in namespace %q and we are in namespace %q",
ctr.ID(), s.namespace, ctr.config.Namespace)
}
@@ -399,7 +488,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if err != nil {
return err
}
- defer s.closeDBCon(db)
+ defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
idsBucket, err := getIDBucket(tx)
@@ -446,16 +535,16 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
podDB = podBucket.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "pod %s does not exist in database", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in database", pod.ID())
}
podCtrs = podDB.Bucket(containersBkt)
if podCtrs == nil {
- return errors.Wrapf(ErrInternal, "pod %s does not have a containers bucket", pod.ID())
+ return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID())
}
podNS := podDB.Get(namespaceKey)
if !bytes.Equal(podNS, ctrNamespace) {
- return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s",
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s",
ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace)
}
}
@@ -463,11 +552,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
// Check if we already have a container with the given ID and name
idExist := idsBucket.Get(ctrID)
if idExist != nil {
- return errors.Wrapf(ErrCtrExists, "ID %s is in use", ctr.ID())
+ return errors.Wrapf(define.ErrCtrExists, "ID %s is in use", ctr.ID())
}
nameExist := namesBucket.Get(ctrName)
if nameExist != nil {
- return errors.Wrapf(ErrCtrExists, "name %s is in use", ctr.Name())
+ return errors.Wrapf(define.ErrCtrExists, "name %s is in use", ctr.Name())
}
// No overlapping containers
@@ -523,34 +612,34 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
depCtrBkt := ctrBucket.Bucket(depCtrID)
if depCtrBkt == nil {
- return errors.Wrapf(ErrNoSuchCtr, "container %s depends on container %s, but it does not exist in the DB", ctr.ID(), dependsCtr)
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s, but it does not exist in the DB", ctr.ID(), dependsCtr)
}
depCtrPod := depCtrBkt.Get(podIDKey)
if pod != nil {
// If we're part of a pod, make sure the dependency is part of the same pod
if depCtrPod == nil {
- return errors.Wrapf(ErrInvalidArg, "container %s depends on container %s which is not in pod %s", ctr.ID(), dependsCtr, pod.ID())
+ return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is not in pod %s", ctr.ID(), dependsCtr, pod.ID())
}
if string(depCtrPod) != pod.ID() {
- return errors.Wrapf(ErrInvalidArg, "container %s depends on container %s which is in a different pod (%s)", ctr.ID(), dependsCtr, string(depCtrPod))
+ return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a different pod (%s)", ctr.ID(), dependsCtr, string(depCtrPod))
}
} else {
// If we're not part of a pod, we cannot depend on containers in a pod
if depCtrPod != nil {
- return errors.Wrapf(ErrInvalidArg, "container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods", ctr.ID(), dependsCtr)
+ return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods", ctr.ID(), dependsCtr)
}
}
depNamespace := depCtrBkt.Get(namespaceKey)
if !bytes.Equal(ctrNamespace, depNamespace) {
- return errors.Wrapf(ErrNSMismatch, "container %s in namespace %q depends on container %s in namespace %q - namespaces must match", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace))
+ return errors.Wrapf(define.ErrNSMismatch, "container %s in namespace %q depends on container %s in namespace %q - namespaces must match", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace))
}
depCtrDependsBkt := depCtrBkt.Bucket(dependenciesBkt)
if depCtrDependsBkt == nil {
- return errors.Wrapf(ErrInternal, "container %s does not have a dependencies bucket", dependsCtr)
+ return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", dependsCtr)
}
if err := depCtrDependsBkt.Put(ctrID, ctrName); err != nil {
return errors.Wrapf(err, "error adding ctr %s as dependency of container %s", ctr.ID(), dependsCtr)
@@ -558,7 +647,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
}
// Add ctr to pod
- if pod != nil {
+ if pod != nil && podCtrs != nil {
if err := podCtrs.Put(ctrID, ctrName); err != nil {
return errors.Wrapf(err, "error adding container %s to pod %s", ctr.ID(), pod.ID())
}
@@ -568,7 +657,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
for _, vol := range ctr.config.NamedVolumes {
volDB := volBkt.Bucket([]byte(vol.Name))
if volDB == nil {
- return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
}
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
@@ -634,7 +723,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
podDB = podBucket.Bucket(podID)
if podDB == nil {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
}
}
@@ -642,21 +731,21 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
ctrExists := ctrBucket.Bucket(ctrID)
if ctrExists == nil {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
}
// Compare namespace
// We can't remove containers not in our namespace
if s.namespace != "" {
if s.namespace != ctr.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
}
if pod != nil && s.namespace != pod.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q, does not match out namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q, does not match out namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
}
}
- if podDB != nil {
+ if podDB != nil && pod != nil {
// Check if the container is in the pod, remove it if it is
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
@@ -665,7 +754,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
} else {
ctrInPod := podCtrs.Get(ctrID)
if ctrInPod == nil {
- return errors.Wrapf(ErrNoSuchCtr, "container %s is not in pod %s", ctr.ID(), pod.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not in pod %s", ctr.ID(), pod.ID())
}
if err := podCtrs.Delete(ctrID); err != nil {
return errors.Wrapf(err, "error removing container %s from pod %s", ctr.ID(), pod.ID())
@@ -676,7 +765,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
// Does the container have dependencies?
ctrDepsBkt := ctrExists.Bucket(dependenciesBkt)
if ctrDepsBkt == nil {
- return errors.Wrapf(ErrInternal, "container %s does not have a dependencies bucket", ctr.ID())
+ return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", ctr.ID())
}
deps := []string{}
err = ctrDepsBkt.ForEach(func(id, value []byte) error {
@@ -688,11 +777,11 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
return err
}
if len(deps) != 0 {
- return errors.Wrapf(ErrCtrExists, "container %s is a dependency of the following containers: %s", ctr.ID(), strings.Join(deps, ", "))
+ return errors.Wrapf(define.ErrCtrExists, "container %s is a dependency of the following containers: %s", ctr.ID(), strings.Join(deps, ", "))
}
if err := ctrBucket.DeleteBucket(ctrID); err != nil {
- return errors.Wrapf(ErrInternal, "error deleting container %s from DB", ctr.ID())
+ return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", ctr.ID())
}
if err := idsBucket.Delete(ctrID); err != nil {
diff --git a/libpod/common_test.go b/libpod/common_test.go
index df730098e..93ca7bc71 100644
--- a/libpod/common_test.go
+++ b/libpod/common_test.go
@@ -7,6 +7,7 @@ import (
"testing"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/opencontainers/runtime-tools/generate"
@@ -49,7 +50,7 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
},
},
state: &ContainerState{
- State: ContainerStateRunning,
+ State: define.ContainerStateRunning,
ConfigPath: "/does/not/exist/specs/" + id,
RunDir: "/does/not/exist/tmp/",
Mounted: true,
@@ -88,13 +89,13 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
ctr.config.Labels["test"] = "testing"
- // Allocate a lock for the container
- lock, err := manager.AllocateLock()
+ // Allocate a containerLock for the container
+ containerLock, err := manager.AllocateLock()
if err != nil {
return nil, err
}
- ctr.lock = lock
- ctr.config.LockID = lock.ID()
+ ctr.lock = containerLock
+ ctr.config.LockID = containerLock.ID()
return ctr, nil
}
@@ -113,13 +114,13 @@ func getTestPod(id, name string, manager lock.Manager) (*Pod, error) {
valid: true,
}
- // Allocate a lock for the pod
- lock, err := manager.AllocateLock()
+ // Allocate a podLock for the pod
+ podLock, err := manager.AllocateLock()
if err != nil {
return nil, err
}
- pod.lock = lock
- pod.config.LockID = lock.ID()
+ pod.lock = podLock
+ pod.config.LockID = podLock.ID()
return pod, nil
}
diff --git a/libpod/container.go b/libpod/container.go
index c8ab42fc3..2d96b1120 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -11,39 +11,16 @@ import (
"github.com/containernetworking/cni/pkg/types"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containers/image/manifest"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/containers/libpod/pkg/namespaces"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
-// ContainerStatus represents the current state of a container
-type ContainerStatus int
-
-const (
- // ContainerStateUnknown indicates that the container is in an error
- // state where information about it cannot be retrieved
- ContainerStateUnknown ContainerStatus = iota
- // ContainerStateConfigured indicates that the container has had its
- // storage configured but it has not been created in the OCI runtime
- ContainerStateConfigured ContainerStatus = iota
- // ContainerStateCreated indicates the container has been created in
- // the OCI runtime but not started
- ContainerStateCreated ContainerStatus = iota
- // ContainerStateRunning indicates the container is currently executing
- ContainerStateRunning ContainerStatus = iota
- // ContainerStateStopped indicates that the container was running but has
- // exited
- ContainerStateStopped ContainerStatus = iota
- // ContainerStatePaused indicates that the container has been paused
- ContainerStatePaused ContainerStatus = iota
- // ContainerStateExited indicates the the container has stopped and been
- // cleaned up
- ContainerStateExited ContainerStatus = iota
-)
-
// CgroupfsDefaultCgroupParent is the cgroup parent for CGroupFS in libpod
const CgroupfsDefaultCgroupParent = "/libpod_parent"
@@ -51,6 +28,10 @@ const CgroupfsDefaultCgroupParent = "/libpod_parent"
// manager in libpod
const SystemdDefaultCgroupParent = "machine.slice"
+// SystemdDefaultRootlessCgroupParent is the cgroup parent for the systemd cgroup
+// manager in libpod when running as rootless
+const SystemdDefaultRootlessCgroupParent = "user.slice"
+
// JournaldLogging is the string conmon expects to specify journald logging
const JournaldLogging = "journald"
@@ -135,7 +116,6 @@ const (
// assume that their callers handled this requirement. Generally speaking, if a
// function takes the container lock and accesses any part of state, it should
// syncContainer() immediately after locking.
-// ffjson: skip
type Container struct {
config *ContainerConfig
@@ -146,9 +126,10 @@ type Container struct {
// Functions called on a batched container will not lock or sync
batched bool
- valid bool
- lock lock.Locker
- runtime *Runtime
+ valid bool
+ lock lock.Locker
+ runtime *Runtime
+ ociRuntime *OCIRuntime
rootlessSlirpSyncR *os.File
rootlessSlirpSyncW *os.File
@@ -157,14 +138,16 @@ type Container struct {
// being checkpointed. If requestedIP is set it will be used instead
// of config.StaticIP.
requestedIP net.IP
+
+ // This is true if a container is restored from a checkpoint.
+ restoreFromCheckpoint bool
}
// ContainerState contains the current state of the container
// It is stored on disk in a tmpfs and recreated on reboot
-// easyjson:json
type ContainerState struct {
// The current state of the running container
- State ContainerStatus `json:"state"`
+ State define.ContainerStatus `json:"state"`
// The path to the JSON OCI runtime spec for this container
ConfigPath string `json:"configPath,omitempty"`
// RunDir is a per-boot directory for container content
@@ -188,6 +171,8 @@ type ContainerState struct {
OOMKilled bool `json:"oomKilled,omitempty"`
// PID is the PID of a running container
PID int `json:"pid,omitempty"`
+ // ConmonPID is the PID of the container's conmon
+ ConmonPID int `json:"conmonPid,omitempty"`
// ExecSessions contains active exec sessions for container
// Exec session ID is mapped to PID of exec process
ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"`
@@ -222,7 +207,6 @@ type ContainerState struct {
}
// ExecSession contains information on an active exec session
-// easyjson:json
type ExecSession struct {
ID string `json:"id"`
Command []string `json:"command"`
@@ -232,7 +216,6 @@ type ExecSession struct {
// ContainerConfig contains all information that was used to create the
// container. It may not be changed once created.
// It is stored, read-only, on disk
-// easyjson:json
type ContainerConfig struct {
Spec *spec.Spec `json:"spec"`
ID string `json:"id"`
@@ -425,51 +408,6 @@ type ContainerNamedVolume struct {
Options []string `json:"options,omitempty"`
}
-// ContainerStatus returns a string representation for users
-// of a container state
-func (t ContainerStatus) String() string {
- switch t {
- case ContainerStateUnknown:
- return "unknown"
- case ContainerStateConfigured:
- return "configured"
- case ContainerStateCreated:
- return "created"
- case ContainerStateRunning:
- return "running"
- case ContainerStateStopped:
- return "stopped"
- case ContainerStatePaused:
- return "paused"
- case ContainerStateExited:
- return "exited"
- }
- return "bad state"
-}
-
-// StringToContainerStatus converts a string representation of a containers
-// status into an actual container status type
-func StringToContainerStatus(status string) (ContainerStatus, error) {
- switch status {
- case ContainerStateUnknown.String():
- return ContainerStateUnknown, nil
- case ContainerStateConfigured.String():
- return ContainerStateConfigured, nil
- case ContainerStateCreated.String():
- return ContainerStateCreated, nil
- case ContainerStateRunning.String():
- return ContainerStateRunning, nil
- case ContainerStateStopped.String():
- return ContainerStateStopped, nil
- case ContainerStatePaused.String():
- return ContainerStatePaused, nil
- case ContainerStateExited.String():
- return ContainerStateExited, nil
- default:
- return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
- }
-}
-
// Config accessors
// Unlocked
@@ -507,7 +445,7 @@ func (c *Container) specFromState() (*spec.Spec, error) {
if err != nil {
return nil, errors.Wrapf(err, "error reading container config")
}
- if err := json.Unmarshal([]byte(content), &returnSpec); err != nil {
+ if err := json.Unmarshal(content, &returnSpec); err != nil {
return nil, errors.Wrapf(err, "error unmarshalling container config")
}
} else {
@@ -701,10 +639,7 @@ func (c *Container) HostsAdd() []string {
// trigger some OCI hooks.
func (c *Container) UserVolumes() []string {
volumes := make([]string, 0, len(c.config.UserVolumes))
- for _, vol := range c.config.UserVolumes {
- volumes = append(volumes, vol)
- }
-
+ volumes = append(volumes, c.config.UserVolumes...)
return volumes
}
@@ -712,10 +647,7 @@ func (c *Container) UserVolumes() []string {
// This is not added to the spec, but is instead used during image commit.
func (c *Container) Entrypoint() []string {
entrypoint := make([]string, 0, len(c.config.Entrypoint))
- for _, str := range c.config.Entrypoint {
- entrypoint = append(entrypoint, str)
- }
-
+ entrypoint = append(entrypoint, c.config.Entrypoint...)
return entrypoint
}
@@ -723,10 +655,7 @@ func (c *Container) Entrypoint() []string {
// This is not added to the spec, but is instead used during image commit
func (c *Container) Command() []string {
command := make([]string, 0, len(c.config.Command))
- for _, str := range c.config.Command {
- command = append(command, str)
- }
-
+ command = append(command, c.config.Command...)
return command
}
@@ -793,7 +722,7 @@ func (c *Container) LogDriver() string {
// RuntimeName returns the name of the runtime
func (c *Container) RuntimeName() string {
- return c.runtime.ociRuntime.name
+ return c.config.OCIRuntime
}
// Runtime spec accessors
@@ -820,13 +749,13 @@ func (c *Container) WorkingDir() string {
// Require locking
// State returns the current state of the container
-func (c *Container) State() (ContainerStatus, error) {
+func (c *Container) State() (define.ContainerStatus, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return ContainerStateUnknown, err
+ return define.ContainerStateUnknown, err
}
}
return c.state.State, nil
@@ -916,7 +845,7 @@ func (c *Container) OOMKilled() (bool, error) {
return c.state.OOMKilled, nil
}
-// PID returns the PID of the container
+// PID returns the PID of the container.
// If the container is not running, a pid of 0 will be returned. No error will
// occur.
func (c *Container) PID() (int, error) {
@@ -932,6 +861,22 @@ func (c *Container) PID() (int, error) {
return c.state.PID, nil
}
+// ConmonPID Returns the PID of the container's conmon process.
+// If the container is not running, a PID of 0 will be returned. No error will
+// occur.
+func (c *Container) ConmonPID() (int, error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return -1, err
+ }
+ }
+
+ return c.state.ConmonPID, nil
+}
+
// ExecSessions retrieves active exec sessions running in the container
func (c *Container) ExecSessions() ([]string, error) {
if !c.batched {
@@ -965,7 +910,7 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) {
session, ok := c.state.ExecSessions[id]
if !ok {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no exec session with ID %s found in container %s", id, c.ID())
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no exec session with ID %s found in container %s", id, c.ID())
}
returnSession := new(ExecSession)
@@ -990,7 +935,7 @@ func (c *Container) IPs() ([]net.IPNet, error) {
}
if !c.config.CreateNetNS {
- return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID())
+ return nil, errors.Wrapf(define.ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID())
}
ips := make([]net.IPNet, 0)
@@ -1018,7 +963,7 @@ func (c *Container) Routes() ([]types.Route, error) {
}
if !c.config.CreateNetNS {
- return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID())
+ return nil, errors.Wrapf(define.ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID())
}
routes := make([]types.Route, 0)
@@ -1085,7 +1030,7 @@ func (c *Container) StoppedByUser() (bool, error) {
// NamespacePath returns the path of one of the container's namespaces
// If the container is not running, an error will be returned
-func (c *Container) NamespacePath(ns LinuxNS) (string, error) {
+func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -1094,15 +1039,15 @@ func (c *Container) NamespacePath(ns LinuxNS) (string, error) {
}
}
- if c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused {
- return "", errors.Wrapf(ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
+ if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
+ return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
}
- if ns == InvalidNS {
- return "", errors.Wrapf(ErrInvalidArg, "invalid namespace requested from container %s", c.ID())
+ if linuxNS == InvalidNS {
+ return "", errors.Wrapf(define.ErrInvalidArg, "invalid namespace requested from container %s", c.ID())
}
- return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, ns.String()), nil
+ return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, linuxNS.String()), nil
}
// CGroupPath returns a cgroups "path" for a given container.
@@ -1111,9 +1056,13 @@ func (c *Container) CGroupPath() (string, error) {
case CgroupfsCgroupsManager:
return filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID())), nil
case SystemdCgroupsManager:
+ if rootless.IsRootless() {
+ uid := rootless.GetRootlessUID()
+ return filepath.Join(c.config.CgroupParent, fmt.Sprintf("user-%d.slice/user@%d.service/user.slice", uid, uid), createUnitName("libpod", c.ID())), nil
+ }
return filepath.Join(c.config.CgroupParent, createUnitName("libpod", c.ID())), nil
default:
- return "", errors.Wrapf(ErrInvalidArg, "unsupported CGroup manager %s in use", c.runtime.config.CgroupManager)
+ return "", errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager %s in use", c.runtime.config.CgroupManager)
}
}
diff --git a/libpod/container.log.go b/libpod/container.log.go
new file mode 100644
index 000000000..7d0cd5bfb
--- /dev/null
+++ b/libpod/container.log.go
@@ -0,0 +1,73 @@
+package libpod
+
+import (
+ "os"
+
+ "github.com/containers/libpod/libpod/logs"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Log is a runtime function that can read one or more container logs.
+func (r *Runtime) Log(containers []*Container, options *logs.LogOptions, logChannel chan *logs.LogLine) error {
+ for _, ctr := range containers {
+ if err := ctr.ReadLog(options, logChannel); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ReadLog reads a containers log based on the input options and returns loglines over a channel
+func (c *Container) ReadLog(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
+ // TODO Skip sending logs until journald logs can be read
+ // TODO make this not a magic string
+ if c.LogDriver() == JournaldLogging {
+ return c.readFromJournal(options, logChannel)
+ }
+ return c.readFromLogFile(options, logChannel)
+}
+
+func (c *Container) readFromLogFile(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
+ t, tailLog, err := logs.GetLogFile(c.LogPath(), options)
+ if err != nil {
+ // If the log file does not exist, this is not fatal.
+ if os.IsNotExist(errors.Cause(err)) {
+ return nil
+ }
+ return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
+ }
+ options.WaitGroup.Add(1)
+ if len(tailLog) > 0 {
+ for _, nll := range tailLog {
+ nll.CID = c.ID()
+ if nll.Since(options.Since) {
+ logChannel <- nll
+ }
+ }
+ }
+
+ go func() {
+ var partial string
+ for line := range t.Lines {
+ nll, err := logs.NewLogLine(line.Text)
+ if err != nil {
+ logrus.Error(err)
+ continue
+ }
+ if nll.Partial() {
+ partial = partial + nll.Msg
+ continue
+ } else if !nll.Partial() && len(partial) > 1 {
+ nll.Msg = partial
+ partial = ""
+ }
+ nll.CID = c.ID()
+ if nll.Since(options.Since) {
+ logChannel <- nll
+ }
+ }
+ options.WaitGroup.Done()
+ }()
+ return nil
+}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index eff5bfe5f..cd020e429 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -2,18 +2,13 @@ package libpod
import (
"context"
- "fmt"
"io"
"io/ioutil"
"os"
- "strconv"
- "sync"
"time"
- "github.com/containers/libpod/libpod/driver"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
- "github.com/containers/libpod/pkg/inspect"
- "github.com/containers/libpod/pkg/lookup"
"github.com/containers/storage/pkg/stringid"
"github.com/docker/docker/oci/caps"
"github.com/opentracing/opentracing-go"
@@ -38,10 +33,10 @@ func (c *Container) Init(ctx context.Context) (err error) {
}
}
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
+ if !(c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
}
// don't recursively start
@@ -56,7 +51,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container
return c.reinit(ctx, false)
}
@@ -117,24 +112,27 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
if err := c.prepareToStart(ctx, recursive); err != nil {
return nil, err
}
-
attachChan := make(chan error)
// We need to ensure that we don't return until start() fired in attach.
- // Use a WaitGroup to sync this.
- wg := new(sync.WaitGroup)
- wg.Add(1)
+ // Use a channel to sync
+ startedChan := make(chan bool)
// Attach to the container before starting it
go func() {
- if err := c.attach(streams, keys, resize, true, wg); err != nil {
+ if err := c.attach(streams, keys, resize, true, startedChan); err != nil {
attachChan <- err
}
close(attachChan)
}()
- wg.Wait()
- c.newContainerEvent(events.Attach)
+ select {
+ case err := <-attachChan:
+ return nil, err
+ case <-startedChan:
+ c.newContainerEvent(events.Attach)
+ }
+
return attachChan, nil
}
@@ -179,15 +177,15 @@ func (c *Container) StopWithTimeout(timeout uint) error {
}
}
- if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateUnknown ||
- c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s in state %s", c.ID(), c.state.State.String())
+ if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateUnknown ||
+ c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String())
}
- if c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited {
- return ErrCtrStopped
+ if c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited {
+ return define.ErrCtrStopped
}
defer c.newContainerEvent(events.Stop)
return c.stop(timeout)
@@ -204,12 +202,12 @@ func (c *Container) Kill(signal uint) error {
}
}
- if c.state.State != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers")
+ if c.state.State != define.ContainerStateRunning {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}
defer c.newContainerEvent(events.Kill)
- if err := c.runtime.ociRuntime.killContainer(c, signal); err != nil {
+ if err := c.ociRuntime.killContainer(c, signal); err != nil {
return err
}
@@ -219,49 +217,33 @@ func (c *Container) Kill(signal uint) error {
}
// Exec starts a new process inside the container
+// Returns an exit code and an error. If Exec was not able to exec in the container before a failure, an exit code of 126 is returned.
+// If another generic error happens, an exit code of 125 is returned.
+// Sometimes, the $RUNTIME exec call errors, and if that is the case, the exit code is the exit code of the call.
+// Otherwise, the exit code will be the exit code of the executed call inside of the container.
// TODO investigate allowing exec without attaching
-func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs int) error {
+func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs int, resize chan remotecommand.TerminalSize, detachKeys string) (int, error) {
var capList []string
-
- locked := false
if !c.batched {
- locked = true
-
c.lock.Lock()
- defer func() {
- if locked {
- c.lock.Unlock()
- }
- }()
+ defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return err
+ return define.ExecErrorCodeCannotInvoke, err
}
}
conState := c.state.State
// TODO can probably relax this once we track exec sessions
- if conState != ContainerStateRunning {
- return errors.Errorf("cannot exec into container that is not running")
+ if conState != define.ContainerStateRunning {
+ return define.ExecErrorCodeCannotInvoke, errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running")
}
+
if privileged || c.config.Privileged {
capList = caps.GetAllCapabilities()
}
- // If user was set, look it up in the container to get a UID to use on
- // the host
- hostUser := ""
- if user != "" {
- execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, nil)
- if err != nil {
- return err
- }
-
- // runc expects user formatted as uid:gid
- hostUser = fmt.Sprintf("%d:%d", execUser.Uid, execUser.Gid)
- }
-
// Generate exec session ID
// Ensure we don't conflict with an existing session ID
sessionID := stringid.GenerateNonCryptoID()
@@ -275,52 +257,33 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
break
}
}
- if found == true {
+ if found {
sessionID = stringid.GenerateNonCryptoID()
}
}
logrus.Debugf("Creating new exec session in container %s with session id %s", c.ID(), sessionID)
-
- execCmd, err := c.runtime.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, hostUser, sessionID, streams, preserveFDs)
- if err != nil {
- return errors.Wrapf(err, "error exec %s", c.ID())
+ if err := c.createExecBundle(sessionID); err != nil {
+ return define.ExecErrorCodeCannotInvoke, err
}
- chWait := make(chan error)
- go func() {
- chWait <- execCmd.Wait()
- close(chWait)
- }()
-
- pidFile := c.execPidPath(sessionID)
- // 60 second seems a reasonable time to wait
- // https://github.com/containers/libpod/issues/1495
- // https://github.com/containers/libpod/issues/1816
- const pidWaitTimeout = 60000
- // Wait until the runtime makes the pidfile
- exited, err := WaitForFile(pidFile, chWait, pidWaitTimeout*time.Millisecond)
- if err != nil {
- if exited {
- // If the runtime exited, propagate the error we got from the process.
- return err
+ defer func() {
+ // cleanup exec bundle
+ if err := c.cleanupExecBundle(sessionID); err != nil {
+ logrus.Errorf("Error removing exec session %s bundle path for container %s: %v", sessionID, c.ID(), err)
}
- return errors.Wrapf(err, "timed out waiting for runtime to create pidfile for exec session in container %s", c.ID())
- }
+ }()
- // Pidfile exists, read it
- contents, err := ioutil.ReadFile(pidFile)
+ pid, attachChan, err := c.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, user, sessionID, streams, preserveFDs, resize, detachKeys)
if err != nil {
- // We don't know the PID of the exec session
- // However, it may still be alive
- // TODO handle this better
- return errors.Wrapf(err, "could not read pidfile for exec session %s in container %s", sessionID, c.ID())
- }
- pid, err := strconv.ParseInt(string(contents), 10, 32)
- if err != nil {
- // As above, we don't have a valid PID, but the exec session is likely still alive
- // TODO handle this better
- return errors.Wrapf(err, "error parsing PID of exec session %s in container %s", sessionID, c.ID())
+ ec := define.ExecErrorCodeGeneric
+ // Conmon will pass a non-zero exit code from the runtime as a pid here.
+ // we differentiate a pid with an exit code by sending it as negative, so reverse
+ // that change and return the exit code the runtime failed with.
+ if pid < 0 {
+ ec = -1 * pid
+ }
+ return ec, err
}
// We have the PID, add it to state
@@ -330,12 +293,12 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
session := new(ExecSession)
session.ID = sessionID
session.Command = cmd
- session.PID = int(pid)
+ session.PID = pid
c.state.ExecSessions[sessionID] = session
if err := c.save(); err != nil {
// Now we have a PID but we can't save it in the DB
// TODO handle this better
- return errors.Wrapf(err, "error saving exec sessions %s for container %s", sessionID, c.ID())
+ return define.ExecErrorCodeGeneric, errors.Wrapf(err, "error saving exec sessions %s for container %s", sessionID, c.ID())
}
c.newContainerEvent(events.Exec)
logrus.Debugf("Successfully started exec session %s in container %s", sessionID, c.ID())
@@ -343,23 +306,33 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
// Unlock so other processes can use the container
if !c.batched {
c.lock.Unlock()
- locked = false
}
- var waitErr error
- if !exited {
- waitErr = <-chWait
+ lastErr := <-attachChan
+
+ exitCode, err := c.readExecExitCode(sessionID)
+ if err != nil {
+ if lastErr != nil {
+ logrus.Errorf(lastErr.Error())
+ }
+ lastErr = err
+ }
+ if exitCode != 0 {
+ if lastErr != nil {
+ logrus.Errorf(lastErr.Error())
+ }
+ lastErr = errors.Wrapf(define.ErrOCIRuntime, "non zero exit code: %d", exitCode)
}
// Lock again
if !c.batched {
- locked = true
c.lock.Lock()
}
// Sync the container again to pick up changes in state
if err := c.syncContainer(); err != nil {
- return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), sessionID)
+ logrus.Errorf("error syncing container %s state to remove exec session %s", c.ID(), sessionID)
+ return exitCode, lastErr
}
// Remove the exec session from state
@@ -367,7 +340,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
if err := c.save(); err != nil {
logrus.Errorf("Error removing exec session %s from container %s state: %v", sessionID, c.ID(), err)
}
- return waitErr
+ return exitCode, lastErr
}
// AttachStreams contains streams that will be attached to the container
@@ -400,10 +373,10 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re
c.lock.Unlock()
}
- if c.state.State != ContainerStateCreated &&
- c.state.State != ContainerStateRunning &&
- c.state.State != ContainerStateExited {
- return errors.Wrapf(ErrCtrStateInvalid, "can only attach to created or running containers")
+ if c.state.State != define.ContainerStateCreated &&
+ c.state.State != define.ContainerStateRunning &&
+ c.state.State != define.ContainerStateExited {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
}
defer c.newContainerEvent(events.Attach)
return c.attach(streams, keys, resize, false, nil)
@@ -441,13 +414,13 @@ func (c *Container) Unmount(force bool) error {
return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID())
}
if mounted == 1 {
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
}
if len(c.state.ExecSessions) != 0 {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
}
- return errors.Wrapf(ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
+ return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
}
}
defer c.newContainerEvent(events.Unmount)
@@ -465,11 +438,11 @@ func (c *Container) Pause() error {
}
}
- if c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is already paused", c.ID())
+ if c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID())
}
- if c.state.State != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
+ if c.state.State != define.ContainerStateRunning {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
}
defer c.newContainerEvent(events.Pause)
return c.pause()
@@ -486,8 +459,8 @@ func (c *Container) Unpause() error {
}
}
- if c.state.State != ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
+ if c.state.State != define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
}
defer c.newContainerEvent(events.Unpause)
return c.unpause()
@@ -511,7 +484,7 @@ func (c *Container) Export(path string) error {
// AddArtifact creates and writes to an artifact file for the container
func (c *Container) AddArtifact(name string, data []byte) error {
if !c.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
return ioutil.WriteFile(c.getArtifactPath(name), data, 0740)
@@ -520,7 +493,7 @@ func (c *Container) AddArtifact(name string, data []byte) error {
// GetArtifact reads the specified artifact file from the container
func (c *Container) GetArtifact(name string) ([]byte, error) {
if !c.valid {
- return nil, ErrCtrRemoved
+ return nil, define.ErrCtrRemoved
}
return ioutil.ReadFile(c.getArtifactPath(name))
@@ -529,38 +502,12 @@ func (c *Container) GetArtifact(name string) ([]byte, error) {
// RemoveArtifact deletes the specified artifacts file
func (c *Container) RemoveArtifact(name string) error {
if !c.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
return os.Remove(c.getArtifactPath(name))
}
-// Inspect a container for low-level information
-func (c *Container) Inspect(size bool) (*inspect.ContainerInspectData, error) {
- if !c.batched {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- if err := c.syncContainer(); err != nil {
- return nil, err
- }
- }
-
- storeCtr, err := c.runtime.store.Container(c.ID())
- if err != nil {
- return nil, errors.Wrapf(err, "error getting container from store %q", c.ID())
- }
- layer, err := c.runtime.store.Layer(storeCtr.LayerID)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID)
- }
- driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
- if err != nil {
- return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID())
- }
- return c.getContainerInspectData(size, driverData)
-}
-
// Wait blocks until the container exits and returns its exit code.
func (c *Container) Wait() (int32, error) {
return c.WaitWithInterval(DefaultWaitInterval)
@@ -570,7 +517,7 @@ func (c *Container) Wait() (int32, error) {
// code. The argument is the interval at which checks the container's status.
func (c *Container) WaitWithInterval(waitTimeout time.Duration) (int32, error) {
if !c.valid {
- return -1, ErrCtrRemoved
+ return -1, define.ErrCtrRemoved
}
err := wait.PollImmediateInfinite(waitTimeout,
func() (bool, error) {
@@ -605,8 +552,8 @@ func (c *Container) Cleanup(ctx context.Context) error {
}
// Check if state is good
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
// Handle restart policy.
@@ -624,7 +571,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
// Check if we have active exec sessions
if len(c.state.ExecSessions) != 0 {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
}
defer c.newContainerEvent(events.Cleanup)
return c.cleanup(ctx)
@@ -655,6 +602,7 @@ func (c *Container) Batch(batchFunc func(*Container) error) error {
newCtr.config = c.config
newCtr.state = c.state
newCtr.runtime = c.runtime
+ newCtr.ociRuntime = c.ociRuntime
newCtr.lock = c.lock
newCtr.valid = true
@@ -682,11 +630,11 @@ func (c *Container) Sync() error {
// If runtime knows about the container, update its status in runtime
// And then save back to disk
- if (c.state.State != ContainerStateUnknown) &&
- (c.state.State != ContainerStateConfigured) &&
- (c.state.State != ContainerStateExited) {
+ if (c.state.State != define.ContainerStateUnknown) &&
+ (c.state.State != define.ContainerStateConfigured) &&
+ (c.state.State != define.ContainerStateExited) {
oldState := c.state.State
- if err := c.runtime.ociRuntime.updateContainerStatus(c, true); err != nil {
+ if err := c.ociRuntime.updateContainerStatus(c, true); err != nil {
return err
}
// Only save back to DB if state changed
@@ -713,27 +661,27 @@ func (c *Container) Refresh(ctx context.Context) error {
}
wasCreated := false
- if c.state.State == ContainerStateCreated {
+ if c.state.State == define.ContainerStateCreated {
wasCreated = true
}
wasRunning := false
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
wasRunning = true
}
wasPaused := false
- if c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStatePaused {
wasPaused = true
}
// First, unpause the container if it's paused
- if c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStatePaused {
if err := c.unpause(); err != nil {
return err
}
}
// Next, if the container is running, stop it
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
if err := c.stop(c.config.StopTimeout); err != nil {
return err
}
@@ -743,14 +691,14 @@ func (c *Container) Refresh(ctx context.Context) error {
if len(c.state.ExecSessions) > 0 {
logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.",
len(c.state.ExecSessions), c.ID())
- if err := c.runtime.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil {
+ if err := c.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil {
return err
}
}
// If the container is in ContainerStateStopped, we need to delete it
// from the runtime and clear conmon state
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
if err := c.delete(ctx); err != nil {
return err
}
@@ -815,11 +763,28 @@ type ContainerCheckpointOptions struct {
// TCPEstablished tells the API to checkpoint a container
// even if it contains established TCP connections
TCPEstablished bool
+ // TargetFile tells the API to read (or write) the checkpoint image
+ // from (or to) the filename set in TargetFile
+ TargetFile string
+ // Name tells the API that during restore from an exported
+ // checkpoint archive a new name should be used for the
+ // restored container
+ Name string
+ // IgnoreRootfs tells the API to not export changes to
+ // the container's root file-system (or to not import)
+ IgnoreRootfs bool
}
// Checkpoint checkpoints a container
func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
logrus.Debugf("Trying to checkpoint container %s", c.ID())
+
+ if options.TargetFile != "" {
+ if err := c.prepareCheckpointExport(); err != nil {
+ return err
+ }
+ }
+
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
diff --git a/libpod/container_attach_linux.go b/libpod/container_attach_linux.go
deleted file mode 100644
index 7e9b7697b..000000000
--- a/libpod/container_attach_linux.go
+++ /dev/null
@@ -1,171 +0,0 @@
-//+build linux
-
-package libpod
-
-import (
- "fmt"
- "io"
- "net"
- "os"
- "path/filepath"
- "sync"
-
- "github.com/containers/libpod/pkg/kubeutils"
- "github.com/containers/libpod/utils"
- "github.com/docker/docker/pkg/term"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "golang.org/x/sys/unix"
- "k8s.io/client-go/tools/remotecommand"
-)
-
-//#include <sys/un.h>
-// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
-import "C"
-
-/* Sync with stdpipe_t in conmon.c */
-const (
- AttachPipeStdin = 1
- AttachPipeStdout = 2
- AttachPipeStderr = 3
-)
-
-// Attach to the given container
-// Does not check if state is appropriate
-func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, wg *sync.WaitGroup) error {
- if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
- return errors.Wrapf(ErrInvalidArg, "must provide at least one stream to attach to")
- }
-
- // Check the validity of the provided keys first
- var err error
- detachKeys := []byte{}
- if len(keys) > 0 {
- detachKeys, err = term.ToBytes(keys)
- if err != nil {
- return errors.Wrapf(err, "invalid detach keys")
- }
- }
-
- logrus.Debugf("Attaching to container %s", c.ID())
-
- return c.attachContainerSocket(resize, detachKeys, streams, startContainer, wg)
-}
-
-// attachContainerSocket connects to the container's attach socket and deals with the IO.
-// wg is only required if startContainer is true
-// TODO add a channel to allow interrupting
-func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSize, detachKeys []byte, streams *AttachStreams, startContainer bool, wg *sync.WaitGroup) error {
- if startContainer && wg == nil {
- return errors.Wrapf(ErrInternal, "wait group not passed when startContainer set")
- }
-
- kubeutils.HandleResizing(resize, func(size remotecommand.TerminalSize) {
- controlPath := filepath.Join(c.bundlePath(), "ctl")
- controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
- if err != nil {
- logrus.Debugf("Could not open ctl file: %v", err)
- return
- }
- defer controlFile.Close()
-
- logrus.Debugf("Received a resize event: %+v", size)
- if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, size.Height, size.Width); err != nil {
- logrus.Warnf("Failed to write to control file to resize terminal: %v", err)
- }
- })
-
- socketPath := c.AttachSocketPath()
-
- maxUnixLength := int(C.unix_path_length())
- if maxUnixLength < len(socketPath) {
- socketPath = socketPath[0:maxUnixLength]
- }
-
- logrus.Debug("connecting to socket ", socketPath)
-
- conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
- if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
- }
- defer conn.Close()
-
- // If starting was requested, start the container and notify when that's
- // done.
- if startContainer {
- if err := c.start(); err != nil {
- return err
- }
- wg.Done()
- }
-
- receiveStdoutError := make(chan error)
- go func() {
- receiveStdoutError <- redirectResponseToOutputStreams(streams.OutputStream, streams.ErrorStream, streams.AttachOutput, streams.AttachError, conn)
- }()
-
- stdinDone := make(chan error)
- go func() {
- var err error
- if streams.AttachInput {
- _, err = utils.CopyDetachable(conn, streams.InputStream, detachKeys)
- conn.CloseWrite()
- }
- stdinDone <- err
- }()
-
- select {
- case err := <-receiveStdoutError:
- return err
- case err := <-stdinDone:
- if err == ErrDetach {
- return err
- }
- if streams.AttachOutput || streams.AttachError {
- return <-receiveStdoutError
- }
- }
- return nil
-}
-
-func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeOutput, writeError bool, conn io.Reader) error {
- var err error
- buf := make([]byte, 8192+1) /* Sync with conmon STDIO_BUF_SIZE */
- for {
- nr, er := conn.Read(buf)
- if nr > 0 {
- var dst io.Writer
- var doWrite bool
- switch buf[0] {
- case AttachPipeStdout:
- dst = outputStream
- doWrite = writeOutput
- case AttachPipeStderr:
- dst = errorStream
- doWrite = writeError
- default:
- logrus.Infof("Received unexpected attach type %+d", buf[0])
- }
-
- if doWrite {
- nw, ew := dst.Write(buf[1:nr])
- if ew != nil {
- err = ew
- break
- }
- if nr != nw+1 {
- err = io.ErrShortWrite
- break
- }
- }
- }
- if er == io.EOF {
- break
- }
- if er != nil {
- err = er
- break
- }
- }
- return err
-}
diff --git a/libpod/container_attach_unsupported.go b/libpod/container_attach_unsupported.go
deleted file mode 100644
index 9e8badeaf..000000000
--- a/libpod/container_attach_unsupported.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//+build !linux
-
-package libpod
-
-import (
- "sync"
-
- "k8s.io/client-go/tools/remotecommand"
-)
-
-func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, wg *sync.WaitGroup) error {
- return ErrNotImplemented
-}
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 739fcd80e..17586bfad 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/util"
is "github.com/containers/image/storage"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/pkg/errors"
@@ -28,9 +29,6 @@ type ContainerCommitOptions struct {
Changes []string
}
-// ChangeCmds is the list of valid Changes commands to passed to the Commit call
-var ChangeCmds = []string{"CMD", "ENTRYPOINT", "ENV", "EXPOSE", "LABEL", "ONBUILD", "STOPSIGNAL", "USER", "VOLUME", "WORKDIR"}
-
// Commit commits the changes between a container and its image, creating a new
// image
func (c *Container) Commit(ctx context.Context, destImage string, options ContainerCommitOptions) (*image.Image, error) {
@@ -51,12 +49,12 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
}
}
- if c.state.State == ContainerStateRunning && options.Pause {
- if err := c.runtime.ociRuntime.pauseContainer(c); err != nil {
+ if c.state.State == define.ContainerStateRunning && options.Pause {
+ if err := c.ociRuntime.pauseContainer(c); err != nil {
return nil, errors.Wrapf(err, "error pausing container %q", c.ID())
}
defer func() {
- if err := c.runtime.ociRuntime.unpauseContainer(c); err != nil {
+ if err := c.ociRuntime.unpauseContainer(c); err != nil {
logrus.Errorf("error unpausing container %q: %v", c.ID(), err)
}
}()
diff --git a/libpod/container_graph.go b/libpod/container_graph.go
index da93be77d..5aa51bc2f 100644
--- a/libpod/container_graph.go
+++ b/libpod/container_graph.go
@@ -4,6 +4,7 @@ import (
"context"
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -43,7 +44,7 @@ func buildContainerGraph(ctrs []*Container) (*containerGraph, error) {
// Get the dep's node
depNode, ok := graph.nodes[dep]
if !ok {
- return nil, errors.Wrapf(ErrNoSuchCtr, "container %s depends on container %s not found in input list", node.id, dep)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s not found in input list", node.id, dep)
}
// Add the dependent node to the node's dependencies
@@ -68,7 +69,7 @@ func buildContainerGraph(ctrs []*Container) (*containerGraph, error) {
if err != nil {
return nil, err
} else if cycle {
- return nil, errors.Wrapf(ErrInternal, "cycle found in container dependency graph")
+ return nil, errors.Wrapf(define.ErrInternal, "cycle found in container dependency graph")
}
return graph, nil
@@ -133,7 +134,7 @@ func detectCycles(graph *containerGraph) (bool, error) {
if info.lowLink == info.index {
l := len(stack)
if l == 0 {
- return false, errors.Wrapf(ErrInternal, "empty stack in detectCycles")
+ return false, errors.Wrapf(define.ErrInternal, "empty stack in detectCycles")
}
// Pop off the stack
@@ -143,7 +144,7 @@ func detectCycles(graph *containerGraph) (bool, error) {
// Popped item is no longer on the stack, mark as such
topInfo, ok := nodes[topOfStack.id]
if !ok {
- return false, errors.Wrapf(ErrInternal, "error finding node info for %s", topOfStack.id)
+ return false, errors.Wrapf(define.ErrInternal, "error finding node info for %s", topOfStack.id)
}
topInfo.onStack = false
@@ -186,7 +187,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
if setError {
// Mark us as visited, and set an error
ctrsVisited[node.id] = true
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
+ ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
// Hit anyone who depends on us, and set errors on them too
for _, successor := range node.dependedOn {
@@ -226,7 +227,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
} else if len(depsStopped) > 0 {
// Our dependencies are not running
depsList := strings.Join(depsStopped, ",")
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
+ ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
ctrErrored = true
}
@@ -243,13 +244,13 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
// Start the container (only if it is not running)
if !ctrErrored {
- if !restart && node.container.state.State != ContainerStateRunning {
+ if !restart && node.container.state.State != define.ContainerStateRunning {
if err := node.container.initAndStart(ctx); err != nil {
ctrErrored = true
ctrErrors[node.id] = err
}
}
- if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
+ if restart && node.container.state.State != define.ContainerStatePaused && node.container.state.State != define.ContainerStateUnknown {
if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
ctrErrored = true
ctrErrors[node.id] = err
@@ -263,6 +264,4 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
for _, successor := range node.dependedOn {
startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
}
-
- return
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index a7369bfdd..aee8c4657 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -1,23 +1,660 @@
package libpod
import (
+ "fmt"
"strings"
+ "time"
- "github.com/containers/libpod/pkg/inspect"
+ "github.com/containers/image/manifest"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/driver"
+ "github.com/containers/libpod/pkg/util"
"github.com/cri-o/ocicni/pkg/ocicni"
- specs "github.com/opencontainers/runtime-spec/specs-go"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/opencontainers/runtime-tools/validate"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "github.com/syndtr/gocapability/capability"
)
-func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data) (*inspect.ContainerInspectData, error) {
+const (
+ // InspectAnnotationCIDFile is used by Inspect to determine if a
+ // container ID file was created for the container.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationCIDFile = "io.podman.annotations.cid-file"
+ // InspectAnnotationAutoremove is used by Inspect to determine if a
+ // container will be automatically removed on exit.
+ // If an annotation with this key is found in the OCI spec and is one of
+ // the two supported boolean values (InspectResponseTrue and
+ // InspectResponseFalse) it will be used in the output of Inspect().
+ InspectAnnotationAutoremove = "io.podman.annotations.autoremove"
+ // InspectAnnotationVolumesFrom is used by Inspect to identify
+ // containers whose volumes are are being used by this container.
+ // It is expected to be a comma-separated list of container names and/or
+ // IDs.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationVolumesFrom = "io.podman.annotations.volumes-from"
+ // InspectAnnotationPrivileged is used by Inspect to identify containers
+ // which are privileged (IE, running with elevated privileges).
+ // It is expected to be a boolean, populated by one of
+ // InspectResponseTrue or InspectResponseFalse.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationPrivileged = "io.podman.annotations.privileged"
+ // InspectAnnotationPublishAll is used by Inspect to identify containers
+ // which have all the ports from their image published.
+ // It is expected to be a boolean, populated by one of
+ // InspectResponseTrue or InspectResponseFalse.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationPublishAll = "io.podman.annotations.publish-all"
+ // InspectAnnotationInit is used by Inspect to identify containers that
+ // mount an init binary in.
+ // It is expected to be a boolean, populated by one of
+ // InspectResponseTrue or InspectResponseFalse.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationInit = "io.podman.annotations.init"
+ // InspectAnnotationLabel is used by Inspect to identify containers with
+ // special SELinux-related settings. It is used to populate the output
+ // of the SecurityOpt setting.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationLabel = "io.podman.annotations.label"
+ // InspectAnnotationSeccomp is used by Inspect to identify containers
+ // with special Seccomp-related settings. It is used to populate the
+ // output of the SecurityOpt setting in Inspect.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationSeccomp = "io.podman.annotations.seccomp"
+ // InspectAnnotationApparmor is used by Inspect to identify containers
+ // with special Apparmor-related settings. It is used to populate the
+ // output of the SecurityOpt setting.
+ // If an annotation with this key is found in the OCI spec, it will be
+ // used in the output of Inspect().
+ InspectAnnotationApparmor = "io.podman.annotations.apparmor"
+
+ // InspectResponseTrue is a boolean True response for an inspect
+ // annotation.
+ InspectResponseTrue = "TRUE"
+ // InspectResponseFalse is a boolean False response for an inspect
+ // annotation.
+ InspectResponseFalse = "FALSE"
+)
+
+// InspectContainerData provides a detailed record of a container's configuration
+// and state as viewed by Libpod.
+// Large portions of this structure are defined such that the output is
+// compatible with `docker inspect` JSON, but additional fields have been added
+// as required to share information not in the original output.
+type InspectContainerData struct {
+ ID string `json:"Id"`
+ Created time.Time `json:"Created"`
+ Path string `json:"Path"`
+ Args []string `json:"Args"`
+ State *InspectContainerState `json:"State"`
+ ImageID string `json:"Image"`
+ ImageName string `json:"ImageName"`
+ Rootfs string `json:"Rootfs"`
+ ResolvConfPath string `json:"ResolvConfPath"`
+ HostnamePath string `json:"HostnamePath"`
+ HostsPath string `json:"HostsPath"`
+ StaticDir string `json:"StaticDir"`
+ OCIConfigPath string `json:"OCIConfigPath,omitempty"`
+ OCIRuntime string `json:"OCIRuntime,omitempty"`
+ LogPath string `json:"LogPath"`
+ ConmonPidFile string `json:"ConmonPidFile"`
+ Name string `json:"Name"`
+ RestartCount int32 `json:"RestartCount"`
+ Driver string `json:"Driver"`
+ MountLabel string `json:"MountLabel"`
+ ProcessLabel string `json:"ProcessLabel"`
+ AppArmorProfile string `json:"AppArmorProfile"`
+ EffectiveCaps []string `json:"EffectiveCaps"`
+ BoundingCaps []string `json:"BoundingCaps"`
+ ExecIDs []string `json:"ExecIDs"`
+ GraphDriver *driver.Data `json:"GraphDriver"`
+ SizeRw int64 `json:"SizeRw,omitempty"`
+ SizeRootFs int64 `json:"SizeRootFs,omitempty"`
+ Mounts []InspectMount `json:"Mounts"`
+ Dependencies []string `json:"Dependencies"`
+ NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` //TODO
+ ExitCommand []string `json:"ExitCommand"`
+ Namespace string `json:"Namespace"`
+ IsInfra bool `json:"IsInfra"`
+ Config *InspectContainerConfig `json:"Config"`
+ HostConfig *InspectContainerHostConfig `json:"HostConfig"`
+}
+
+// InspectContainerConfig holds further data about how a container was initially
+// configured.
+type InspectContainerConfig struct {
+ // Container hostname
+ Hostname string `json:"Hostname"`
+ // Container domain name - unused at present
+ DomainName string `json:"Domainname"`
+ // User the container was launched with
+ User string `json:"User"`
+ // Unused, at present
+ AttachStdin bool `json:"AttachStdin"`
+ // Unused, at present
+ AttachStdout bool `json:"AttachStdout"`
+ // Unused, at present
+ AttachStderr bool `json:"AttachStderr"`
+ // Whether the container creates a TTY
+ Tty bool `json:"Tty"`
+ // Whether the container leaves STDIN open
+ OpenStdin bool `json:"OpenStdin"`
+ // Whether STDIN is only left open once.
+ // Presently not supported by Podman, unused.
+ StdinOnce bool `json:"StdinOnce"`
+ // Container environment variables
+ Env []string `json:"Env"`
+ // Container command
+ Cmd []string `json:"Cmd"`
+ // Container image
+ Image string `json:"Image"`
+ // Unused, at present. I've never seen this field populated.
+ Volumes map[string]struct{} `json:"Volumes"`
+ // Container working directory
+ WorkingDir string `json:"WorkingDir"`
+ // Container entrypoint
+ Entrypoint string `json:"Entrypoint"`
+ // On-build arguments - presently unused. More of Buildah's domain.
+ OnBuild *string `json:"OnBuild"`
+ // Container labels
+ Labels map[string]string `json:"Labels"`
+ // Container annotations
+ Annotations map[string]string `json:"Annotations"`
+ // Container stop signal
+ StopSignal uint `json:"StopSignal"`
+ // Configured healthcheck for the container
+ Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"`
+}
+
+// InspectContainerHostConfig holds information used when the container was
+// created.
+// It's very much a Docker-specific struct, retained (mostly) as-is for
+// compatibility. We fill individual fields as best as we can, inferring as much
+// as possible from the spec and container config.
+// Some things cannot be inferred. These will be populated by spec annotations
+// (if available).
+// Field names are fixed for compatibility and cannot be changed.
+// As such, silence lint warnings about them.
+//nolint
+type InspectContainerHostConfig struct {
+ // Binds contains an array of user-added mounts.
+ // Both volume mounts and named volumes are included.
+ // Tmpfs mounts are NOT included.
+ // In 'docker inspect' this is separated into 'Binds' and 'Mounts' based
+ // on how a mount was added. We do not make this distinction and do not
+ // include a Mounts field in inspect.
+ // Format: <src>:<destination>[:<comma-separated options>]
+ Binds []string `json:"Binds"`
+ // ContainerIDFile is a file created during container creation to hold
+ // the ID of the created container.
+ // This is not handled within libpod and is stored in an annotation.
+ ContainerIDFile string `json:"ContainerIDFile"`
+ // LogConfig contains information on the container's logging backend
+ LogConfig *InspectLogConfig `json:"LogConfig"`
+ // NetworkMode is the configuration of the container's network
+ // namespace.
+ // Populated as follows:
+ // default - A network namespace is being created and configured via CNI
+ // none - A network namespace is being created, not configured via CNI
+ // host - No network namespace created
+ // container:<id> - Using another container's network namespace
+ // ns:<path> - A path to a network namespace has been specified
+ NetworkMode string `json:"NetworkMode"`
+ // PortBindings contains the container's port bindings.
+ // It is formatted as map[string][]InspectHostPort.
+ // The string key here is formatted as <integer port number>/<protocol>
+ // and represents the container port. A single container port may be
+ // bound to multiple host ports (on different IPs).
+ PortBindings map[string][]InspectHostPort `json:"PortBindings"`
+ // RestartPolicy contains the container's restart policy.
+ RestartPolicy *InspectRestartPolicy `json:"RestartPolicy"`
+ // AutoRemove is whether the container will be automatically removed on
+ // exiting.
+ // It is not handled directly within libpod and is stored in an
+ // annotation.
+ AutoRemove bool `json:"AutoRemove"`
+ // VolumeDriver is presently unused and is retained for Docker
+ // compatibility.
+ VolumeDriver string `json:"VolumeDriver"`
+ // VolumesFrom is a list of containers which this container uses volumes
+ // from. This is not handled directly within libpod and is stored in an
+ // annotation.
+ // It is formatted as an array of container names and IDs.
+ VolumesFrom []string `json:"VolumesFrom"`
+ // CapAdd is a list of capabilities added to the container.
+ // It is not directly stored by Libpod, and instead computed from the
+ // capabilities listed in the container's spec, compared against a set
+ // of default capabilities.
+ CapAdd []string `json:"CapAdd"`
+ // CapDrop is a list of capabilities removed from the container.
+ // It is not directly stored by libpod, and instead computed from the
+ // capabilities listed in the container's spec, compared against a set
+ // of default capabilities.
+ CapDrop []string `json:"CapDrop"`
+ // Dns is a list of DNS nameservers that will be added to the
+ // container's resolv.conf
+ Dns []string `json:"Dns"`
+ // DnsOptions is a list of DNS options that will be set in the
+ // container's resolv.conf
+ DnsOptions []string `json:"DnsOptions"`
+ // DnsSearch is a list of DNS search domains that will be set in the
+ // container's resolv.conf
+ DnsSearch []string `json:"DnsSearch"`
+ // ExtraHosts contains hosts that will be aded to the container's
+ // /etc/hosts.
+ ExtraHosts []string `json:"ExtraHosts"`
+ // GroupAdd contains groups that the user inside the container will be
+ // added to.
+ GroupAdd []string `json:"GroupAdd"`
+ // IpcMode represents the configuration of the container's IPC
+ // namespace.
+ // Populated as follows:
+ // "" (empty string) - Default, an IPC namespace will be created
+ // host - No IPC namespace created
+ // container:<id> - Using another container's IPC namespace
+ // ns:<path> - A path to an IPC namespace has been specified
+ IpcMode string `json:"IpcMode"`
+ // Cgroup contains the container's cgroup. It is presently not
+ // populated.
+ // TODO.
+ Cgroup string `json:"Cgroup"`
+ // Links is unused, and provided purely for Docker compatibility.
+ Links []string `json:"Links"`
+ // OOMScoreAdj is an adjustment that will be made to the container's OOM
+ // score.
+ OomScoreAdj int `json:"OomScoreAdj"`
+ // PidMode represents the configuration of the container's PID
+ // namespace.
+ // Populated as follows:
+ // "" (empty string) - Default, a PID namespace will be created
+ // host - No PID namespace created
+ // container:<id> - Using another container's PID namespace
+ // ns:<path> - A path to a PID namespace has been specified
+ PidMode string `json:"PidMode"`
+ // Privileged indicates whether the container is running with elevated
+ // privileges.
+ // This has a very specific meaning in the Docker sense, so it's very
+ // difficult to decode from the spec and config, and so is stored as an
+ // annotation.
+ Privileged bool `json:"Privileged"`
+ // PublishAllPorts indicates whether image ports are being published.
+ // This is not directly stored in libpod and is saved as an annotation.
+ PublishAllPorts bool `json:"PublishAllPorts"`
+ // ReadonlyRootfs is whether the container will be mounted read-only.
+ ReadonlyRootfs bool `json:"ReadonlyRootfs"`
+ // SecurityOpt is a list of security-related options that are set in the
+ // container.
+ SecurityOpt []string `json:"SecurityOpt"`
+ // Tmpfs is a list of tmpfs filesystems that will be mounted into the
+ // container.
+ // It is a map of destination path to options for the mount.
+ Tmpfs map[string]string `json:"Tmpfs"`
+ // UTSMode represents the configuration of the container's UID
+ // namespace.
+ // Populated as follows:
+ // "" (empty string) - Default, a UTS namespace will be created
+ // host - no UTS namespace created
+ // container:<id> - Using another container's UTS namespace
+ // ns:<path> - A path to a UTS namespace has been specified
+ UTSMode string `json:"UTSMode"`
+ // UsernsMode represents the configuration of the container's user
+ // namespace.
+ // When running rootless, a user namespace is created outside of libpod
+ // to allow some privileged operations. This will not be reflected here.
+ // Populated as follows:
+ // "" (empty string) - No user namespace will be created
+ // private - The container will be run in a user namespace
+ // container:<id> - Using another container's user namespace
+ // ns:<path> - A path to a user namespace has been specified
+ // TODO Rootless has an additional 'keep-id' option, presently not
+ // reflected here.
+ UsernsMode string `json:"UsernsMode"`
+ // ShmSize is the size of the container's SHM device.
+ ShmSize int64 `json:"ShmSize"`
+ // Runtime is provided purely for Docker compatibility.
+ // It is set unconditionally to "oci" as Podman does not presently
+ // support non-OCI runtimes.
+ Runtime string `json:"Runtime"`
+ // ConsoleSize is an array of 2 integers showing the size of the
+ // container's console.
+ // It is only set if the container is creating a terminal.
+ // TODO.
+ ConsoleSize []uint `json:"ConsoleSize"`
+ // Isolation is presently unused and provided solely for Docker
+ // compatibility.
+ Isolation string `json:"Isolation"`
+ // CpuShares indicates the CPU resources allocated to the container.
+ // It is a relative weight in the scheduler for assigning CPU time
+ // versus other CGroups.
+ CpuShares uint64 `json:"CpuShares"`
+ // Memory indicates the memory resources allocated to the container.
+ // This is the limit (in bytes) of RAM the container may use.
+ Memory int64 `json:"Memory"`
+ // NanoCpus indicates number of CPUs allocated to the container.
+ // It is an integer where one full CPU is indicated by 1000000000 (one
+ // billion).
+ // Thus, 2.5 CPUs (fractional portions of CPUs are allowed) would be
+ // 2500000000 (2.5 billion).
+ // In 'docker inspect' this is set exclusively of two further options in
+ // the output (CpuPeriod and CpuQuota) which are both used to implement
+ // this functionality.
+ // We can't distinguish here, so if CpuQuota is set to the default of
+ // 100000, we will set both CpuQuota, CpuPeriod, and NanoCpus. If
+ // CpuQuota is not the default, we will not set NanoCpus.
+ NanoCpus int64 `json:"NanoCpus"`
+ // CgroupParent is the CGroup parent of the container.
+ // Only set if not default.
+ CgroupParent string `json:"CgroupParent"`
+ // BlkioWeight indicates the I/O resources allocated to the container.
+ // It is a relative weight in the scheduler for assigning I/O time
+ // versus other CGroups.
+ BlkioWeight uint16 `json:"BlkioWeight"`
+ // BlkioWeightDevice is an array of I/O resource priorities for
+ // individual device nodes.
+ // Unfortunately, the spec only stores the device's Major/Minor numbers
+ // and not the path, which is used here.
+ // Fortunately, the kernel provides an interface for retrieving the path
+ // of a given node by major:minor at /sys/dev/. However, the exact path
+ // in use may not be what was used in the original CLI invocation -
+ // though it is guaranteed that the device node will be the same, and
+ // using the given path will be functionally identical.
+ BlkioWeightDevice []InspectBlkioWeightDevice `json:"BlkioWeightDevice"`
+ // BlkioDeviceReadBps is an array of I/O throttle parameters for
+ // individual device nodes.
+ // This specifically sets read rate cap in bytes per second for device
+ // nodes.
+ // As with BlkioWeightDevice, we pull the path from /sys/dev, and we
+ // don't guarantee the path will be identical to the original (though
+ // the node will be).
+ BlkioDeviceReadBps []InspectBlkioThrottleDevice `json:"BlkioDeviceReadBps"`
+ // BlkioDeviceWriteBps is an array of I/O throttle parameters for
+ // individual device nodes.
+ // this specifically sets write rate cap in bytes per second for device
+ // nodes.
+ // as with BlkioWeightDevice, we pull the path from /sys/dev, and we
+ // don't guarantee the path will be identical to the original (though
+ // the node will be).
+ BlkioDeviceWriteBps []InspectBlkioThrottleDevice `json:"BlkioDeviceWriteBps"`
+ // BlkioDeviceReadIOps is an array of I/O throttle parameters for
+ // individual device nodes.
+ // This specifically sets the read rate cap in iops per second for
+ // device nodes.
+ // As with BlkioWeightDevice, we pull the path from /sys/dev, and we
+ // don't guarantee the path will be identical to the original (though
+ // the node will be).
+ BlkioDeviceReadIOps []InspectBlkioThrottleDevice `json:"BlkioDeviceReadIOps"`
+ // BlkioDeviceWriteIOps is an array of I/O throttle parameters for
+ // individual device nodes.
+ // This specifically sets the write rate cap in iops per second for
+ // device nodes.
+ // As with BlkioWeightDevice, we pull the path from /sys/dev, and we
+ // don't guarantee the path will be identical to the original (though
+ // the node will be).
+ BlkioDeviceWriteIOps []InspectBlkioThrottleDevice `json:"BlkioDeviceWriteIOps"`
+ // CpuPeriod is the length of a CPU period in microseconds.
+ // It relates directly to CpuQuota.
+ CpuPeriod uint64 `json:"CpuPeriod"`
+ // CpuPeriod is the amount of time (in microseconds) that a container
+ // can use the CPU in every CpuPeriod.
+ CpuQuota int64 `json:"CpuQuota"`
+ // CpuRealtimePeriod is the length of time (in microseconds) of the CPU
+ // realtime period. If set to 0, no time will be allocated to realtime
+ // tasks.
+ CpuRealtimePeriod uint64 `json:"CpuRealtimePeriod"`
+ // CpuRealtimeRuntime is the length of time (in microseconds) allocated
+ // for realtime tasks within every CpuRealtimePeriod.
+ CpuRealtimeRuntime int64 `json:"CpuRealtimeRuntime"`
+ // CpusetCpus is the is the set of CPUs that the container will execute
+ // on. Formatted as `0-3` or `0,2`. Default (if unset) is all CPUs.
+ CpusetCpus string `json:"CpusetCpus"`
+ // CpusetMems is the set of memory nodes the container will use.
+ // Formatted as `0-3` or `0,2`. Default (if unset) is all memory nodes.
+ CpusetMems string `json:"CpusetMems"`
+ // Devices is a list of device nodes that will be added to the
+ // container.
+ // These are stored in the OCI spec only as type, major, minor while we
+ // display the host path. We convert this with /sys/dev, but we cannot
+ // guarantee that the host path will be identical - only that the actual
+ // device will be.
+ Devices []InspectDevice `json:"Devices"`
+ // DiskQuota is the maximum amount of disk space the container may use
+ // (in bytes).
+ // Presently not populated.
+ // TODO.
+ DiskQuota uint64 `json:"DiskQuota"`
+ // KernelMemory is the maximum amount of memory the kernel will devote
+ // to the container.
+ KernelMemory int64 `json:"KernelMemory"`
+ // MemoryReservation is the reservation (soft limit) of memory available
+ // to the container. Soft limits are warnings only and can be exceeded.
+ MemoryReservation int64 `json:"MemoryReservation"`
+ // MemorySwap is the total limit for all memory available to the
+ // container, including swap. 0 indicates that there is no limit to the
+ // amount of memory available.
+ MemorySwap int64 `json:"MemorySwap"`
+ // MemorySwappiness is the willingness of the kernel to page container
+ // memory to swap. It is an integer from 0 to 100, with low numbers
+ // being more likely to be put into swap.
+ // -1, the default, will not set swappiness and use the system defaults.
+ MemorySwappiness int64 `json:"MemorySwappiness"`
+ // OomKillDisable indicates whether the kernel OOM killer is disabled
+ // for the container.
+ OomKillDisable bool `json:"OomKillDisable"`
+ // Init indicates whether the container has an init mounted into it.
+ Init bool `json:"Init,omitempty"`
+ // PidsLimit is the maximum number of PIDs what may be created within
+ // the container. 0, the default, indicates no limit.
+ PidsLimit int64 `json:"PidsLimit"`
+ // Ulimits is a set of ulimits that will be set within the container.
+ Ulimits []InspectUlimit `json:"Ulimits"`
+ // CpuCount is Windows-only and not presently implemented.
+ CpuCount uint64 `json:"CpuCount"`
+ // CpuPercent is Windows-only and not presently implemented.
+ CpuPercent uint64 `json:"CpuPercent"`
+ // IOMaximumIOps is Windows-only and not presently implemented.
+ IOMaximumIOps uint64 `json:"IOMaximumIOps"`
+ // IOMaximumBandwidth is Windows-only and not presently implemented.
+ IOMaximumBandwidth uint64 `json:"IOMaximumBandwidth"`
+}
+
+// InspectLogConfig holds information about a container's configured log driver
+// and is presently unused. It is retained for Docker compatibility.
+type InspectLogConfig struct {
+ Type string `json:"Type"`
+ Config map[string]string `json:"Config"` //idk type, TODO
+}
+
+// InspectRestartPolicy holds information about the container's restart policy.
+type InspectRestartPolicy struct {
+ // Name contains the container's restart policy.
+ // Allowable values are "no" or "" (take no action),
+ // "on-failure" (restart on non-zero exit code, with an optional max
+ // retry count), and "always" (always restart on container stop, unless
+ // explicitly requested by API).
+ // Note that this is NOT actually a name of any sort - the poor naming
+ // is for Docker compatibility.
+ Name string `json:"Name"`
+ // MaximumRetryCount is the maximum number of retries allowed if the
+ // "on-failure" restart policy is in use. Not used if "on-failure" is
+ // not set.
+ MaximumRetryCount uint `json:"MaximumRetryCount"`
+}
+
+// InspectBlkioWeightDevice holds information about the relative weight
+// of an individual device node. Weights are used in the I/O scheduler to give
+// relative priority to some accesses.
+type InspectBlkioWeightDevice struct {
+ // Path is the path to the device this applies to.
+ Path string `json:"Path"`
+ // Weight is the relative weight the scheduler will use when scheduling
+ // I/O.
+ Weight uint16 `json:"Weight"`
+}
+
+// InspectBlkioThrottleDevice holds information about a speed cap for a device
+// node. This cap applies to a specific operation (read, write, etc) on the given
+// node.
+type InspectBlkioThrottleDevice struct {
+ // Path is the path to the device this applies to.
+ Path string `json:"Path"`
+ // Rate is the maximum rate. It is in either bytes per second or iops
+ // per second, determined by where it is used - documentation will
+ // indicate which is appropriate.
+ Rate uint64 `json:"Rate"`
+}
+
+// InspectUlimit is a ulimit that will be applied to the container.
+type InspectUlimit struct {
+ // Name is the name (type) of the ulimit.
+ Name string `json:"Name"`
+ // Soft is the soft limit that will be applied.
+ Soft uint64 `json:"Soft"`
+ // Hard is the hard limit that will be applied.
+ Hard uint64 `json:"Hard"`
+}
+
+// InspectMount provides a record of a single mount in a container. It contains
+// fields for both named and normal volumes. Only user-specified volumes will be
+// included, and tmpfs volumes are not included even if the user specified them.
+type InspectMount struct {
+ // Whether the mount is a volume or bind mount. Allowed values are
+ // "volume" and "bind".
+ Type string `json:"Type"`
+ // The name of the volume. Empty for bind mounts.
+ Name string `json:"Name,omptempty"`
+ // The source directory for the volume.
+ Source string `json:"Source"`
+ // The destination directory for the volume. Specified as a path within
+ // the container, as it would be passed into the OCI runtime.
+ Destination string `json:"Destination"`
+ // The driver used for the named volume. Empty for bind mounts.
+ Driver string `json:"Driver"`
+ // Contains SELinux :z/:Z mount options. Unclear what, if anything, else
+ // goes in here.
+ Mode string `json:"Mode"`
+ // All remaining mount options. Additional data, not present in the
+ // original output.
+ Options []string `json:"Options"`
+ // Whether the volume is read-write
+ RW bool `json:"RW"`
+ // Mount propagation for the mount. Can be empty if not specified, but
+ // is always printed - no omitempty.
+ Propagation string `json:"Propagation"`
+}
+
+// InspectDevice is a single device that will be mounted into the container.
+type InspectDevice struct {
+ // PathOnHost is the path of the device on the host.
+ PathOnHost string `json:"PathOnHost"`
+ // PathInContainer is the path of the device within the container.
+ PathInContainer string `json:"PathInContainer"`
+ // CgroupPermissions is the permissions of the mounted device.
+ // Presently not populated.
+ // TODO.
+ CgroupPermissions string `json:"CgroupPermissions"`
+}
+
+// InspectHostPort provides information on a port on the host that a container's
+// port is bound to.
+type InspectHostPort struct {
+ // IP on the host we are bound to. "" if not specified (binding to all
+ // IPs).
+ HostIP string `json:"HostIp"`
+ // Port on the host we are bound to. No special formatting - just an
+ // integer stuffed into a string.
+ HostPort string `json:"HostPort"`
+}
+
+// InspectContainerState provides a detailed record of a container's current
+// state. It is returned as part of InspectContainerData.
+// As with InspectContainerData, many portions of this struct are matched to
+// Docker, but here we see more fields that are unused (nonsensical in the
+// context of Libpod).
+type InspectContainerState struct {
+ OciVersion string `json:"OciVersion"`
+ Status string `json:"Status"`
+ Running bool `json:"Running"`
+ Paused bool `json:"Paused"`
+ Restarting bool `json:"Restarting"` // TODO
+ OOMKilled bool `json:"OOMKilled"`
+ Dead bool `json:"Dead"`
+ Pid int `json:"Pid"`
+ ConmonPid int `json:"ConmonPid,omitempty"`
+ ExitCode int32 `json:"ExitCode"`
+ Error string `json:"Error"` // TODO
+ StartedAt time.Time `json:"StartedAt"`
+ FinishedAt time.Time `json:"FinishedAt"`
+ Healthcheck HealthCheckResults `json:"Healthcheck,omitempty"`
+}
+
+// InspectNetworkSettings holds information about the network settings of the
+// container.
+// Many fields are maintained only for compatibility with `docker inspect` and
+// are unused within Libpod.
+type InspectNetworkSettings struct {
+ Bridge string `json:"Bridge"`
+ SandboxID string `json:"SandboxID"`
+ HairpinMode bool `json:"HairpinMode"`
+ LinkLocalIPv6Address string `json:"LinkLocalIPv6Address"`
+ LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen"`
+ Ports []ocicni.PortMapping `json:"Ports"`
+ SandboxKey string `json:"SandboxKey"`
+ SecondaryIPAddresses []string `json:"SecondaryIPAddresses"`
+ SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses"`
+ EndpointID string `json:"EndpointID"`
+ Gateway string `json:"Gateway"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen"`
+ IPAddress string `json:"IPAddress"`
+ IPPrefixLen int `json:"IPPrefixLen"`
+ IPv6Gateway string `json:"IPv6Gateway"`
+ MacAddress string `json:"MacAddress"`
+}
+
+// Inspect a container for low-level information
+func (c *Container) Inspect(size bool) (*InspectContainerData, error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return nil, err
+ }
+ }
+
+ storeCtr, err := c.runtime.store.Container(c.ID())
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting container from store %q", c.ID())
+ }
+ layer, err := c.runtime.store.Layer(storeCtr.LayerID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID)
+ }
+ driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID())
+ }
+ return c.getContainerInspectData(size, driverData)
+}
+
+func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*InspectContainerData, error) {
config := c.config
runtimeInfo := c.state
- spec, err := c.specFromState()
+ ctrSpec, err := c.specFromState()
if err != nil {
return nil, err
}
- // Process is allowed to be nil in the spec
+ // Process is allowed to be nil in the stateSpec
args := []string{}
if config.Spec.Process != nil {
args = config.Spec.Process.Args
@@ -35,49 +672,41 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
execIDs = append(execIDs, id)
}
- if c.state.BindMounts == nil {
- c.state.BindMounts = make(map[string]string)
- }
-
resolvPath := ""
- if getPath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
- resolvPath = getPath
- }
-
hostsPath := ""
- if getPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
- hostsPath = getPath
- }
-
hostnamePath := ""
- if getPath, ok := c.state.BindMounts["/etc/hostname"]; ok {
- hostnamePath = getPath
+ if c.state.BindMounts != nil {
+ if getPath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
+ resolvPath = getPath
+ }
+ if getPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
+ hostsPath = getPath
+ }
+ if getPath, ok := c.state.BindMounts["/etc/hostname"]; ok {
+ hostnamePath = getPath
+ }
}
- var mounts []specs.Mount
- for i, mnt := range spec.Mounts {
- mounts = append(mounts, mnt)
- // We only want to show the name of the named volume in the inspect
- // output, so split the path and get the name out of it.
- if strings.Contains(mnt.Source, c.runtime.config.VolumePath) {
- split := strings.Split(mnt.Source[len(c.runtime.config.VolumePath)+1:], "/")
- mounts[i].Source = split[0]
- }
+ namedVolumes, mounts := c.sortUserVolumes(ctrSpec)
+ inspectMounts, err := c.getInspectMounts(ctrSpec, namedVolumes, mounts)
+ if err != nil {
+ return nil, err
}
- data := &inspect.ContainerInspectData{
+ data := &InspectContainerData{
ID: config.ID,
Created: config.CreatedTime,
Path: path,
Args: args,
- State: &inspect.ContainerInspectState{
- OciVersion: spec.Version,
+ State: &InspectContainerState{
+ OciVersion: ctrSpec.Version,
Status: runtimeInfo.State.String(),
- Running: runtimeInfo.State == ContainerStateRunning,
- Paused: runtimeInfo.State == ContainerStatePaused,
+ Running: runtimeInfo.State == define.ContainerStateRunning,
+ Paused: runtimeInfo.State == define.ContainerStatePaused,
OOMKilled: runtimeInfo.OOMKilled,
Dead: runtimeInfo.State.String() == "bad state",
Pid: runtimeInfo.PID,
+ ConmonPid: runtimeInfo.ConmonPID,
ExitCode: runtimeInfo.ExitCode,
Error: "", // can't get yet
StartedAt: runtimeInfo.StartedTime,
@@ -93,20 +722,21 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
HostsPath: hostsPath,
StaticDir: config.StaticDir,
LogPath: config.LogPath,
+ OCIRuntime: config.OCIRuntime,
ConmonPidFile: config.ConmonPidFile,
Name: config.Name,
RestartCount: int32(runtimeInfo.RestartCount),
Driver: driverData.Name,
MountLabel: config.MountLabel,
ProcessLabel: config.ProcessLabel,
- EffectiveCaps: spec.Process.Capabilities.Effective,
- BoundingCaps: spec.Process.Capabilities.Bounding,
- AppArmorProfile: spec.Process.ApparmorProfile,
+ EffectiveCaps: ctrSpec.Process.Capabilities.Effective,
+ BoundingCaps: ctrSpec.Process.Capabilities.Bounding,
+ AppArmorProfile: ctrSpec.Process.ApparmorProfile,
ExecIDs: execIDs,
GraphDriver: driverData,
- Mounts: mounts,
+ Mounts: inspectMounts,
Dependencies: c.Dependencies(),
- NetworkSettings: &inspect.NetworkSettings{
+ NetworkSettings: &InspectNetworkSettings{
Bridge: "", // TODO
SandboxID: "", // TODO - is this even relevant?
HairpinMode: false, // TODO
@@ -129,8 +759,12 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
IsInfra: c.IsInfra(),
}
+ if c.state.ConfigPath != "" {
+ data.OCIConfigPath = c.state.ConfigPath
+ }
+
if c.config.HealthCheckConfig != nil {
- // This container has a healthcheck defined in it; we need to add it's state
+ // This container has a healthcheck defined in it; we need to add it's state
healthCheckState, err := c.GetHealthCheckLog()
if err != nil {
// An error here is not considered fatal; no health state will be displayed
@@ -148,6 +782,18 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
// Get information on the container's network namespace (if present)
data = c.getContainerNetworkInfo(data)
+ inspectConfig, err := c.generateInspectContainerConfig(ctrSpec)
+ if err != nil {
+ return nil, err
+ }
+ data.Config = inspectConfig
+
+ hostConfig, err := c.generateInspectContainerHostConfig(ctrSpec, namedVolumes, mounts)
+ if err != nil {
+ return nil, err
+ }
+ data.HostConfig = hostConfig
+
if size {
rootFsSize, err := c.rootFsSize()
if err != nil {
@@ -162,3 +808,631 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
}
return data, nil
}
+
+// Get inspect-formatted mounts list.
+// Only includes user-specified mounts. Only includes bind mounts and named
+// volumes, not tmpfs volumes.
+func (c *Container) getInspectMounts(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) ([]InspectMount, error) {
+ inspectMounts := []InspectMount{}
+
+ // No mounts, return early
+ if len(c.config.UserVolumes) == 0 {
+ return inspectMounts, nil
+ }
+
+ for _, volume := range namedVolumes {
+ mountStruct := InspectMount{}
+ mountStruct.Type = "volume"
+ mountStruct.Destination = volume.Dest
+ mountStruct.Name = volume.Name
+
+ // For src and driver, we need to look up the named
+ // volume.
+ volFromDB, err := c.runtime.state.Volume(volume.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
+ }
+ mountStruct.Driver = volFromDB.Driver()
+ mountStruct.Source = volFromDB.MountPoint()
+
+ parseMountOptionsForInspect(volume.Options, &mountStruct)
+
+ inspectMounts = append(inspectMounts, mountStruct)
+ }
+ for _, mount := range mounts {
+ // It's a mount.
+ // Is it a tmpfs? If so, discard.
+ if mount.Type == "tmpfs" {
+ continue
+ }
+
+ mountStruct := InspectMount{}
+ mountStruct.Type = "bind"
+ mountStruct.Source = mount.Source
+ mountStruct.Destination = mount.Destination
+
+ parseMountOptionsForInspect(mount.Options, &mountStruct)
+
+ inspectMounts = append(inspectMounts, mountStruct)
+ }
+
+ return inspectMounts, nil
+}
+
+// Parse mount options so we can populate them in the mount structure.
+// The mount passed in will be modified.
+func parseMountOptionsForInspect(options []string, mount *InspectMount) {
+ isRW := true
+ mountProp := ""
+ zZ := ""
+ otherOpts := []string{}
+
+ // Some of these may be overwritten if the user passes us garbage opts
+ // (for example, [ro,rw])
+ // We catch these on the Podman side, so not a problem there, but other
+ // users of libpod who do not properly validate mount options may see
+ // this.
+ // Not really worth dealing with on our end - garbage in, garbage out.
+ for _, opt := range options {
+ switch opt {
+ case "ro":
+ isRW = false
+ case "rw":
+ // Do nothing, silently discard
+ case "shared", "slave", "private", "rshared", "rslave", "rprivate":
+ mountProp = opt
+ case "z", "Z":
+ zZ = opt
+ default:
+ otherOpts = append(otherOpts, opt)
+ }
+ }
+
+ mount.RW = isRW
+ mount.Propagation = mountProp
+ mount.Mode = zZ
+ mount.Options = otherOpts
+}
+
+// Generate the InspectContainerConfig struct for the Config field of Inspect.
+func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*InspectContainerConfig, error) {
+ ctrConfig := new(InspectContainerConfig)
+
+ ctrConfig.Hostname = c.Hostname()
+ ctrConfig.User = c.config.User
+ if spec.Process != nil {
+ ctrConfig.Tty = spec.Process.Terminal
+ ctrConfig.Env = []string{}
+ ctrConfig.Env = append(ctrConfig.Env, spec.Process.Env...)
+ ctrConfig.WorkingDir = spec.Process.Cwd
+ }
+
+ ctrConfig.OpenStdin = c.config.Stdin
+ ctrConfig.Image = c.config.RootfsImageName
+
+ // Leave empty is not explicitly overwritten by user
+ if len(c.config.Command) != 0 {
+ ctrConfig.Cmd = []string{}
+ ctrConfig.Cmd = append(ctrConfig.Cmd, c.config.Command...)
+ }
+
+ // Leave empty if not explicitly overwritten by user
+ if len(c.config.Entrypoint) != 0 {
+ ctrConfig.Entrypoint = strings.Join(c.config.Entrypoint, " ")
+ }
+
+ if len(c.config.Labels) != 0 {
+ ctrConfig.Labels = make(map[string]string)
+ for k, v := range c.config.Labels {
+ ctrConfig.Labels[k] = v
+ }
+ }
+
+ if len(spec.Annotations) != 0 {
+ ctrConfig.Annotations = make(map[string]string)
+ for k, v := range spec.Annotations {
+ ctrConfig.Annotations[k] = v
+ }
+ }
+
+ ctrConfig.StopSignal = c.config.StopSignal
+ // TODO: should JSON deep copy this to ensure internal pointers don't
+ // leak.
+ ctrConfig.Healthcheck = c.config.HealthCheckConfig
+
+ return ctrConfig, nil
+}
+
+// Generate the InspectContainerHostConfig struct for the HostConfig field of
+// Inspect.
+func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) (*InspectContainerHostConfig, error) {
+ hostConfig := new(InspectContainerHostConfig)
+
+ logConfig := new(InspectLogConfig)
+ logConfig.Type = c.config.LogDriver
+ hostConfig.LogConfig = logConfig
+
+ restartPolicy := new(InspectRestartPolicy)
+ restartPolicy.Name = c.config.RestartPolicy
+ restartPolicy.MaximumRetryCount = c.config.RestartRetries
+ hostConfig.RestartPolicy = restartPolicy
+
+ hostConfig.Dns = make([]string, 0, len(c.config.DNSServer))
+ for _, dns := range c.config.DNSServer {
+ hostConfig.Dns = append(hostConfig.Dns, dns.String())
+ }
+
+ hostConfig.DnsOptions = make([]string, 0, len(c.config.DNSOption))
+ hostConfig.DnsOptions = append(hostConfig.DnsOptions, c.config.DNSOption...)
+
+ hostConfig.DnsSearch = make([]string, 0, len(c.config.DNSSearch))
+ hostConfig.DnsSearch = append(hostConfig.DnsSearch, c.config.DNSSearch...)
+
+ hostConfig.ExtraHosts = make([]string, 0, len(c.config.HostAdd))
+ hostConfig.ExtraHosts = append(hostConfig.ExtraHosts, c.config.HostAdd...)
+
+ hostConfig.GroupAdd = make([]string, 0, len(c.config.Groups))
+ hostConfig.GroupAdd = append(hostConfig.GroupAdd, c.config.Groups...)
+
+ hostConfig.SecurityOpt = []string{}
+ if ctrSpec.Process != nil {
+ if ctrSpec.Process.OOMScoreAdj != nil {
+ hostConfig.OomScoreAdj = *ctrSpec.Process.OOMScoreAdj
+ }
+ if ctrSpec.Process.NoNewPrivileges {
+ hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, "no-new-privileges")
+ }
+ }
+
+ hostConfig.ReadonlyRootfs = ctrSpec.Root.Readonly
+ hostConfig.ShmSize = c.config.ShmSize
+ hostConfig.Runtime = "oci"
+
+ // This is very expensive to initialize.
+ // So we don't want to initialize it unless we absolutely have to - IE,
+ // there are things that require a major:minor to path translation.
+ var deviceNodes map[string]string
+
+ // Annotations
+ if ctrSpec.Annotations != nil {
+ hostConfig.ContainerIDFile = ctrSpec.Annotations[InspectAnnotationCIDFile]
+ if ctrSpec.Annotations[InspectAnnotationAutoremove] == InspectResponseTrue {
+ hostConfig.AutoRemove = true
+ }
+ if ctrs, ok := ctrSpec.Annotations[InspectAnnotationVolumesFrom]; ok {
+ hostConfig.VolumesFrom = strings.Split(ctrs, ",")
+ }
+ if ctrSpec.Annotations[InspectAnnotationPrivileged] == InspectResponseTrue {
+ hostConfig.Privileged = true
+ }
+ if ctrSpec.Annotations[InspectAnnotationInit] == InspectResponseTrue {
+ hostConfig.Init = true
+ }
+ if label, ok := ctrSpec.Annotations[InspectAnnotationLabel]; ok {
+ hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("label=%s", label))
+ }
+ if seccomp, ok := ctrSpec.Annotations[InspectAnnotationSeccomp]; ok {
+ hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("seccomp=%s", seccomp))
+ }
+ if apparmor, ok := ctrSpec.Annotations[InspectAnnotationApparmor]; ok {
+ hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("apparmor=%s", apparmor))
+ }
+ }
+
+ // Resource limits
+ if ctrSpec.Linux != nil {
+ if ctrSpec.Linux.Resources != nil {
+ if ctrSpec.Linux.Resources.CPU != nil {
+ if ctrSpec.Linux.Resources.CPU.Shares != nil {
+ hostConfig.CpuShares = *ctrSpec.Linux.Resources.CPU.Shares
+ }
+ if ctrSpec.Linux.Resources.CPU.Period != nil {
+ hostConfig.CpuPeriod = *ctrSpec.Linux.Resources.CPU.Period
+ }
+ if ctrSpec.Linux.Resources.CPU.Quota != nil {
+ hostConfig.CpuQuota = *ctrSpec.Linux.Resources.CPU.Quota
+ }
+ if ctrSpec.Linux.Resources.CPU.RealtimePeriod != nil {
+ hostConfig.CpuRealtimePeriod = *ctrSpec.Linux.Resources.CPU.RealtimePeriod
+ }
+ if ctrSpec.Linux.Resources.CPU.RealtimeRuntime != nil {
+ hostConfig.CpuRealtimeRuntime = *ctrSpec.Linux.Resources.CPU.RealtimeRuntime
+ }
+ hostConfig.CpusetCpus = ctrSpec.Linux.Resources.CPU.Cpus
+ hostConfig.CpusetMems = ctrSpec.Linux.Resources.CPU.Mems
+ }
+ if ctrSpec.Linux.Resources.Memory != nil {
+ if ctrSpec.Linux.Resources.Memory.Limit != nil {
+ hostConfig.Memory = *ctrSpec.Linux.Resources.Memory.Limit
+ }
+ if ctrSpec.Linux.Resources.Memory.Kernel != nil {
+ hostConfig.KernelMemory = *ctrSpec.Linux.Resources.Memory.Kernel
+ }
+ if ctrSpec.Linux.Resources.Memory.Reservation != nil {
+ hostConfig.MemoryReservation = *ctrSpec.Linux.Resources.Memory.Reservation
+ }
+ if ctrSpec.Linux.Resources.Memory.Swap != nil {
+ hostConfig.MemorySwap = *ctrSpec.Linux.Resources.Memory.Swap
+ }
+ if ctrSpec.Linux.Resources.Memory.Swappiness != nil {
+ hostConfig.MemorySwappiness = int64(*ctrSpec.Linux.Resources.Memory.Swappiness)
+ } else {
+ // Swappiness has a default of -1
+ hostConfig.MemorySwappiness = -1
+ }
+ if ctrSpec.Linux.Resources.Memory.DisableOOMKiller != nil {
+ hostConfig.OomKillDisable = *ctrSpec.Linux.Resources.Memory.DisableOOMKiller
+ }
+ }
+ if ctrSpec.Linux.Resources.Pids != nil {
+ hostConfig.PidsLimit = ctrSpec.Linux.Resources.Pids.Limit
+ }
+ if ctrSpec.Linux.Resources.BlockIO != nil {
+ if ctrSpec.Linux.Resources.BlockIO.Weight != nil {
+ hostConfig.BlkioWeight = *ctrSpec.Linux.Resources.BlockIO.Weight
+ }
+ hostConfig.BlkioWeightDevice = []InspectBlkioWeightDevice{}
+ for _, dev := range ctrSpec.Linux.Resources.BlockIO.WeightDevice {
+ key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor)
+ // TODO: how do we handle LeafWeight vs
+ // Weight? For now, ignore anything
+ // without Weight set.
+ if dev.Weight == nil {
+ logrus.Warnf("Ignoring weight device %s as it lacks a weight", key)
+ continue
+ }
+ if deviceNodes == nil {
+ nodes, err := util.FindDeviceNodes()
+ if err != nil {
+ return nil, err
+ }
+ deviceNodes = nodes
+ }
+ path, ok := deviceNodes[key]
+ if !ok {
+ logrus.Warnf("Could not locate weight device %s in system devices", key)
+ continue
+ }
+ weightDev := InspectBlkioWeightDevice{}
+ weightDev.Path = path
+ weightDev.Weight = *dev.Weight
+ hostConfig.BlkioWeightDevice = append(hostConfig.BlkioWeightDevice, weightDev)
+ }
+
+ handleThrottleDevice := func(devs []spec.LinuxThrottleDevice) ([]InspectBlkioThrottleDevice, error) {
+ out := []InspectBlkioThrottleDevice{}
+ for _, dev := range devs {
+ key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor)
+ if deviceNodes == nil {
+ nodes, err := util.FindDeviceNodes()
+ if err != nil {
+ return nil, err
+ }
+ deviceNodes = nodes
+ }
+ path, ok := deviceNodes[key]
+ if !ok {
+ logrus.Warnf("Could not locate throttle device %s in system devices", key)
+ continue
+ }
+ throttleDev := InspectBlkioThrottleDevice{}
+ throttleDev.Path = path
+ throttleDev.Rate = dev.Rate
+ out = append(out, throttleDev)
+ }
+ return out, nil
+ }
+
+ readBps, err := handleThrottleDevice(ctrSpec.Linux.Resources.BlockIO.ThrottleReadBpsDevice)
+ if err != nil {
+ return nil, err
+ }
+ hostConfig.BlkioDeviceReadBps = readBps
+
+ writeBps, err := handleThrottleDevice(ctrSpec.Linux.Resources.BlockIO.ThrottleWriteBpsDevice)
+ if err != nil {
+ return nil, err
+ }
+ hostConfig.BlkioDeviceWriteBps = writeBps
+
+ readIops, err := handleThrottleDevice(ctrSpec.Linux.Resources.BlockIO.ThrottleReadIOPSDevice)
+ if err != nil {
+ return nil, err
+ }
+ hostConfig.BlkioDeviceReadIOps = readIops
+
+ writeIops, err := handleThrottleDevice(ctrSpec.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice)
+ if err != nil {
+ return nil, err
+ }
+ hostConfig.BlkioDeviceWriteIOps = writeIops
+ }
+ }
+ }
+
+ // NanoCPUs.
+ // This is only calculated if CpuPeriod == 100000.
+ // It is given in nanoseconds, versus the microseconds used elsewhere -
+ // so multiply by 10000 (not sure why, but 1000 is off by 10).
+ if hostConfig.CpuPeriod == 100000 {
+ hostConfig.NanoCpus = 10000 * hostConfig.CpuQuota
+ }
+
+ // Bind mounts, formatted as src:dst.
+ // We'll be appending some options that aren't necessarily in the
+ // original command line... but no helping that from inside libpod.
+ binds := []string{}
+ tmpfs := make(map[string]string)
+ for _, namedVol := range namedVolumes {
+ if len(namedVol.Options) > 0 {
+ binds = append(binds, fmt.Sprintf("%s:%s:%s", namedVol.Name, namedVol.Dest, strings.Join(namedVol.Options, ",")))
+ } else {
+ binds = append(binds, fmt.Sprintf("%s:%s", namedVol.Name, namedVol.Dest))
+ }
+ }
+ for _, mount := range mounts {
+ if mount.Type == "tmpfs" {
+ tmpfs[mount.Destination] = strings.Join(mount.Options, ",")
+ } else {
+ // TODO - maybe we should parse for empty source/destination
+ // here. Would be confusing if we print just a bare colon.
+ if len(mount.Options) > 0 {
+ binds = append(binds, fmt.Sprintf("%s:%s:%s", mount.Source, mount.Destination, strings.Join(mount.Options, ",")))
+ } else {
+ binds = append(binds, fmt.Sprintf("%s:%s", mount.Source, mount.Destination))
+ }
+ }
+ }
+ hostConfig.Binds = binds
+ hostConfig.Tmpfs = tmpfs
+
+ // Network mode parsing.
+ networkMode := ""
+ if c.config.CreateNetNS {
+ networkMode = "default"
+ } else if c.config.NetNsCtr != "" {
+ networkMode = fmt.Sprintf("container:%s", c.config.NetNsCtr)
+ } else {
+ // Find the spec's network namespace.
+ // If there is none, it's host networking.
+ // If there is one and it has a path, it's "ns:".
+ foundNetNS := false
+ for _, ns := range ctrSpec.Linux.Namespaces {
+ if ns.Type == spec.NetworkNamespace {
+ foundNetNS = true
+ if ns.Path != "" {
+ networkMode = fmt.Sprintf("ns:%s", ns.Path)
+ } else {
+ networkMode = "none"
+ }
+ break
+ }
+ }
+ if !foundNetNS {
+ networkMode = "host"
+ }
+ }
+ hostConfig.NetworkMode = networkMode
+
+ // Port bindings.
+ // Only populate if we're using CNI to configure the network.
+ portBindings := make(map[string][]InspectHostPort)
+ if c.config.CreateNetNS {
+ for _, port := range c.config.PortMappings {
+ key := fmt.Sprintf("%d/%s", port.ContainerPort, port.Protocol)
+ hostPorts := portBindings[key]
+ if hostPorts == nil {
+ hostPorts = []InspectHostPort{}
+ }
+ hostPorts = append(hostPorts, InspectHostPort{
+ HostIP: port.HostIP,
+ HostPort: fmt.Sprintf("%d", port.HostPort),
+ })
+ portBindings[key] = hostPorts
+ }
+ }
+ hostConfig.PortBindings = portBindings
+
+ // Cap add and cap drop.
+ // We need a default set of capabilities to compare against.
+ // The OCI generate package has one, and is commonly used, so we'll
+ // use it.
+ // Problem: there are 5 sets of capabilities.
+ // Use the bounding set for this computation, it's the most encompassing
+ // (but still not perfect).
+ capAdd := []string{}
+ capDrop := []string{}
+ // No point in continuing if we got a spec without a Process block...
+ if ctrSpec.Process != nil {
+ // Max an O(1) lookup table for default bounding caps.
+ boundingCaps := make(map[string]bool)
+ g, err := generate.New("linux")
+ if err != nil {
+ return nil, err
+ }
+ if !hostConfig.Privileged {
+ for _, cap := range g.Config.Process.Capabilities.Bounding {
+ boundingCaps[cap] = true
+ }
+ } else {
+ // If we are privileged, use all caps.
+ for _, cap := range capability.List() {
+ if g.HostSpecific && cap > validate.LastCap() {
+ continue
+ }
+ boundingCaps[fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))] = true
+ }
+ }
+ // Iterate through spec caps.
+ // If it's not in default bounding caps, it was added.
+ // If it is, delete from the default set. Whatever remains after
+ // we finish are the dropped caps.
+ for _, cap := range ctrSpec.Process.Capabilities.Bounding {
+ if _, ok := boundingCaps[cap]; ok {
+ delete(boundingCaps, cap)
+ } else {
+ capAdd = append(capAdd, cap)
+ }
+ }
+ for cap := range boundingCaps {
+ capDrop = append(capDrop, cap)
+ }
+ }
+ hostConfig.CapAdd = capAdd
+ hostConfig.CapDrop = capDrop
+
+ // IPC Namespace mode
+ ipcMode := ""
+ if c.config.IPCNsCtr != "" {
+ ipcMode = fmt.Sprintf("container:%s", c.config.IPCNsCtr)
+ } else {
+ // Locate the spec's IPC namespace.
+ // If there is none, it's ipc=host.
+ // If there is one and it has a path, it's "ns:".
+ // If no path, it's default - the empty string.
+ foundIPCNS := false
+ for _, ns := range ctrSpec.Linux.Namespaces {
+ if ns.Type == spec.IPCNamespace {
+ foundIPCNS = true
+ if ns.Path != "" {
+ ipcMode = fmt.Sprintf("ns:%s", ns.Path)
+ }
+ break
+ }
+ }
+ if !foundIPCNS {
+ ipcMode = "host"
+ }
+ }
+ hostConfig.IpcMode = ipcMode
+
+ // CGroup parent
+ // Need to check if it's the default, and not print if so.
+ defaultCgroupParent := ""
+ switch c.runtime.config.CgroupManager {
+ case CgroupfsCgroupsManager:
+ defaultCgroupParent = CgroupfsDefaultCgroupParent
+ case SystemdCgroupsManager:
+ defaultCgroupParent = SystemdDefaultCgroupParent
+ }
+ if c.config.CgroupParent != defaultCgroupParent {
+ hostConfig.CgroupParent = c.config.CgroupParent
+ }
+
+ // PID namespace mode
+ pidMode := ""
+ if c.config.PIDNsCtr != "" {
+ pidMode = fmt.Sprintf("container:%s", c.config.PIDNsCtr)
+ } else {
+ // Locate the spec's PID namespace.
+ // If there is none, it's pid=host.
+ // If there is one and it has a path, it's "ns:".
+ // If there is no path, it's default - the empty string.
+ foundPIDNS := false
+ for _, ns := range ctrSpec.Linux.Namespaces {
+ if ns.Type == spec.PIDNamespace {
+ foundPIDNS = true
+ if ns.Path != "" {
+ pidMode = fmt.Sprintf("ns:%s", ns.Path)
+ }
+ break
+ }
+ }
+ if !foundPIDNS {
+ pidMode = "host"
+ }
+ }
+ hostConfig.PidMode = pidMode
+
+ // UTS namespace mode
+ utsMode := ""
+ if c.config.UTSNsCtr != "" {
+ utsMode = fmt.Sprintf("container:%s", c.config.UTSNsCtr)
+ } else {
+ // Locate the spec's UTS namespace.
+ // If there is none, it's uts=host.
+ // If there is one and it has a path, it's "ns:".
+ // If there is no path, it's default - the empty string.
+ foundUTSNS := false
+ for _, ns := range ctrSpec.Linux.Namespaces {
+ if ns.Type == spec.UTSNamespace {
+ foundUTSNS = true
+ if ns.Path != "" {
+ utsMode = fmt.Sprintf("ns:%s", ns.Path)
+ }
+ break
+ }
+ }
+ if !foundUTSNS {
+ utsMode = "host"
+ }
+ }
+ hostConfig.UTSMode = utsMode
+
+ // User namespace mode
+ usernsMode := ""
+ if c.config.UserNsCtr != "" {
+ usernsMode = fmt.Sprintf("container:%s", c.config.UserNsCtr)
+ } else {
+ // Locate the spec's user namespace.
+ // If there is none, it's default - the empty string.
+ // If there is one, it's "private" if no path, or "ns:" if
+ // there's a path.
+ for _, ns := range ctrSpec.Linux.Namespaces {
+ if ns.Type == spec.UserNamespace {
+ if ns.Path != "" {
+ usernsMode = fmt.Sprintf("ns:%s", ns.Path)
+ } else {
+ usernsMode = "private"
+ }
+ }
+ }
+ }
+ hostConfig.UsernsMode = usernsMode
+
+ // Devices
+ // Do not include if privileged - assumed that all devices will be
+ // included.
+ hostConfig.Devices = []InspectDevice{}
+ if ctrSpec.Linux != nil && !hostConfig.Privileged {
+ for _, dev := range ctrSpec.Linux.Devices {
+ key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor)
+ if deviceNodes == nil {
+ nodes, err := util.FindDeviceNodes()
+ if err != nil {
+ return nil, err
+ }
+ deviceNodes = nodes
+ }
+ path, ok := deviceNodes[key]
+ if !ok {
+ logrus.Warnf("Could not locate device %s on host", key)
+ continue
+ }
+ newDev := InspectDevice{}
+ newDev.PathOnHost = path
+ newDev.PathInContainer = dev.Path
+ hostConfig.Devices = append(hostConfig.Devices, newDev)
+ }
+ }
+
+ // Ulimits
+ hostConfig.Ulimits = []InspectUlimit{}
+ if ctrSpec.Process != nil {
+ for _, limit := range ctrSpec.Process.Rlimits {
+ newLimit := InspectUlimit{}
+ newLimit.Name = limit.Type
+ newLimit.Soft = limit.Soft
+ newLimit.Hard = limit.Hard
+ hostConfig.Ulimits = append(hostConfig.Ulimits, newLimit)
+ }
+ }
+
+ // Terminal size
+ // We can't actually get this for now...
+ // So default to something sane.
+ // TODO: Populate this.
+ hostConfig.ConsoleSize = []uint{0, 0}
+
+ return hostConfig, nil
+}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index b363c193a..83ee5640e 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -12,6 +12,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
@@ -21,15 +22,17 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/mount"
spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// name of the directory holding the artifacts
- artifactsDir = "artifacts"
+ artifactsDir = "artifacts"
+ execDirPermission = 0755
)
// rootFsSize gets the size of the container's root filesystem
@@ -127,17 +130,94 @@ func (c *Container) CheckpointPath() string {
// AttachSocketPath retrieves the path of the container's attach socket
func (c *Container) AttachSocketPath() string {
- return filepath.Join(c.runtime.ociRuntime.socketsDir, c.ID(), "attach")
+ return filepath.Join(c.ociRuntime.socketsDir, c.ID(), "attach")
+}
+
+// exitFilePath gets the path to the container's exit file
+func (c *Container) exitFilePath() string {
+ return filepath.Join(c.ociRuntime.exitsDir, c.ID())
+}
+
+// create a bundle path and associated files for an exec session
+func (c *Container) createExecBundle(sessionID string) (err error) {
+ bundlePath := c.execBundlePath(sessionID)
+ if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil {
+ return createErr
+ }
+ defer func() {
+ if err != nil {
+ if err2 := os.RemoveAll(bundlePath); err != nil {
+ logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2)
+ }
+ }
+ }()
+ if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err2) {
+ err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
+ }
+ }
+ return
+}
+
+// cleanup an exec session after its done
+func (c *Container) cleanupExecBundle(sessionID string) error {
+ return os.RemoveAll(c.execBundlePath(sessionID))
+}
+
+// the path to a containers exec session bundle
+func (c *Container) execBundlePath(sessionID string) string {
+ return filepath.Join(c.bundlePath(), sessionID)
}
// Get PID file path for a container's exec session
func (c *Container) execPidPath(sessionID string) string {
- return filepath.Join(c.state.RunDir, "exec_pid_"+sessionID)
+ return filepath.Join(c.execBundlePath(sessionID), "exec_pid")
}
-// exitFilePath gets the path to the container's exit file
-func (c *Container) exitFilePath() string {
- return filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID())
+// the log path for an exec session
+func (c *Container) execLogPath(sessionID string) string {
+ return filepath.Join(c.execBundlePath(sessionID), "exec_log")
+}
+
+// the socket conmon creates for an exec session
+func (c *Container) execAttachSocketPath(sessionID string) string {
+ return filepath.Join(c.ociRuntime.socketsDir, sessionID, "attach")
+}
+
+// execExitFileDir gets the path to the container's exit file
+func (c *Container) execExitFileDir(sessionID string) string {
+ return filepath.Join(c.execBundlePath(sessionID), "exit")
+}
+
+// execOCILog returns the file path for the exec sessions oci log
+func (c *Container) execOCILog(sessionID string) string {
+ if !c.ociRuntime.supportsJSON {
+ return ""
+ }
+ return filepath.Join(c.execBundlePath(sessionID), "oci-log")
+}
+
+// readExecExitCode reads the exit file for an exec session and returns
+// the exit code
+func (c *Container) readExecExitCode(sessionID string) (int, error) {
+ exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID())
+ chWait := make(chan error)
+ defer close(chWait)
+
+ _, err := WaitForFile(exitFile, chWait, time.Second*5)
+ if err != nil {
+ return -1, err
+ }
+ ec, err := ioutil.ReadFile(exitFile)
+ if err != nil {
+ return -1, err
+ }
+ ecInt, err := strconv.Atoi(string(ec))
+ if err != nil {
+ return -1, err
+ }
+ return ecInt, nil
}
// Wait for the container's exit file to appear.
@@ -154,7 +234,7 @@ func (c *Container) waitForExitFileAndSync() error {
// Reset our state
c.state.ExitCode = -1
c.state.FinishedTime = time.Now()
- c.state.State = ContainerStateStopped
+ c.state.State = define.ContainerStateStopped
if err2 := c.save(); err2 != nil {
logrus.Errorf("Error saving container %s state: %v", c.ID(), err2)
@@ -163,7 +243,7 @@ func (c *Container) waitForExitFileAndSync() error {
return err
}
- if err := c.runtime.ociRuntime.updateContainerStatus(c, false); err != nil {
+ if err := c.ociRuntime.updateContainerStatus(c, false); err != nil {
return err
}
@@ -239,10 +319,10 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
// Is the container running again?
// If so, we don't have to do anything
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return false, nil
- } else if c.state.State == ContainerStateUnknown {
- return false, errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!")
+ } else if c.state.State == define.ContainerStateUnknown {
+ return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!")
}
c.newContainerEvent(events.Restart)
@@ -265,13 +345,13 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
return false, err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, true); err != nil {
return false, err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Initialize the container
if err := c.init(ctx, true); err != nil {
return false, err
@@ -293,20 +373,20 @@ func (c *Container) syncContainer() error {
}
// If runtime knows about the container, update its status in runtime
// And then save back to disk
- if (c.state.State != ContainerStateUnknown) &&
- (c.state.State != ContainerStateConfigured) &&
- (c.state.State != ContainerStateExited) {
+ if (c.state.State != define.ContainerStateUnknown) &&
+ (c.state.State != define.ContainerStateConfigured) &&
+ (c.state.State != define.ContainerStateExited) {
oldState := c.state.State
// TODO: optionally replace this with a stat for the exit file
- if err := c.runtime.ociRuntime.updateContainerStatus(c, false); err != nil {
+ if err := c.ociRuntime.updateContainerStatus(c, false); err != nil {
return err
}
// Only save back to DB if state changed
if c.state.State != oldState {
// Check for a restart policy match
if c.config.RestartPolicy != RestartPolicyNone && c.config.RestartPolicy != RestartPolicyNo &&
- (oldState == ContainerStateRunning || oldState == ContainerStatePaused) &&
- (c.state.State == ContainerStateStopped || c.state.State == ContainerStateExited) &&
+ (oldState == define.ContainerStateRunning || oldState == define.ContainerStatePaused) &&
+ (c.state.State == define.ContainerStateStopped || c.state.State == define.ContainerStateExited) &&
!c.state.StoppedByUser {
c.state.RestartPolicyMatch = true
}
@@ -318,7 +398,7 @@ func (c *Container) syncContainer() error {
}
if !c.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
}
return nil
@@ -331,16 +411,16 @@ func (c *Container) setupStorage(ctx context.Context) error {
defer span.Finish()
if !c.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
}
- if c.state.State != ContainerStateConfigured {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
+ if c.state.State != define.ContainerStateConfigured {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
}
// Need both an image ID and image name, plus a bool telling us whether to use the image configuration
if c.config.Rootfs == "" && (c.config.RootfsImageID == "" || c.config.RootfsImageName == "") {
- return errors.Wrapf(ErrInvalidArg, "must provide image ID and image name to use an image")
+ return errors.Wrapf(define.ErrInvalidArg, "must provide image ID and image name to use an image")
}
options := storage.ContainerOptions{
@@ -350,6 +430,16 @@ func (c *Container) setupStorage(ctx context.Context) error {
},
LabelOpts: c.config.LabelOpts,
}
+ if c.restoreFromCheckpoint {
+ // If restoring from a checkpoint, the root file-system
+ // needs to be mounted with the same SELinux labels as
+ // it was mounted previously.
+ if options.Flags == nil {
+ options.Flags = make(map[string]interface{})
+ }
+ options.Flags["ProcessLabel"] = c.config.ProcessLabel
+ options.Flags["MountLabel"] = c.config.MountLabel
+ }
if c.config.Privileged {
privOpt := func(opt string) bool {
for _, privopt := range []string{"nodev", "nosuid", "noexec"} {
@@ -359,7 +449,8 @@ func (c *Container) setupStorage(ctx context.Context) error {
}
return false
}
- defOptions, err := storage.GetDefaultMountOptions()
+
+ defOptions, err := storage.GetMountOptions(c.runtime.store.GraphDriverName(), c.runtime.store.GraphOptions())
if err != nil {
return errors.Wrapf(err, "error getting default mount options")
}
@@ -415,8 +506,8 @@ func (c *Container) setupStorage(ctx context.Context) error {
// Tear down a container's storage prior to removal
func (c *Container) teardownStorage() error {
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
}
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
@@ -449,10 +540,11 @@ func (c *Container) teardownStorage() error {
// It does not save the results - assumes the database will do that for us
func resetState(state *ContainerState) error {
state.PID = 0
+ state.ConmonPID = 0
state.Mountpoint = ""
state.Mounted = false
- if state.State != ContainerStateExited {
- state.State = ContainerStateConfigured
+ if state.State != define.ContainerStateExited {
+ state.State = define.ContainerStateConfigured
}
state.ExecSessions = make(map[string]*ExecSession)
state.NetworkStatus = nil
@@ -476,7 +568,7 @@ func (c *Container) refresh() error {
}
if !c.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID())
}
// We need to get the container's temporary directory from c/storage
@@ -507,7 +599,7 @@ func (c *Container) refresh() error {
// We need to pick up a new lock
lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error acquiring lock for container %s", c.ID())
+ return errors.Wrapf(err, "error acquiring lock %d for container %s", c.config.LockID, c.ID())
}
c.lock = lock
@@ -545,13 +637,13 @@ func (c *Container) removeConmonFiles() error {
// Instead of outright deleting the exit file, rename it (if it exists).
// We want to retain it so we can get the exit code of containers which
// are removed (at least until we have a workable events system)
- exitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID())
- oldExitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID()))
+ exitFile := filepath.Join(c.ociRuntime.exitsDir, c.ID())
+ oldExitFile := filepath.Join(c.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID()))
if _, err := os.Stat(exitFile); err != nil {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
}
- } else if err == nil {
+ } else {
// Rename should replace the old exit file (if it exists)
if err := os.Rename(exitFile, oldExitFile); err != nil {
return errors.Wrapf(err, "error renaming container %s exit file", c.ID())
@@ -564,11 +656,11 @@ func (c *Container) removeConmonFiles() error {
func (c *Container) export(path string) error {
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
- mount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
+ containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
if err != nil {
return errors.Wrapf(err, "error mounting container %q", c.ID())
}
- mountPoint = mount
+ mountPoint = containerMount
defer func() {
if _, err := c.runtime.store.Unmount(c.ID(), false); err != nil {
logrus.Errorf("error unmounting container %q: %v", c.ID(), err)
@@ -606,7 +698,7 @@ func (c *Container) isStopped() (bool, error) {
if err != nil {
return true, err
}
- return (c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused), nil
+ return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil
}
// save container state to the database
@@ -622,11 +714,11 @@ func (c *Container) save() error {
// Otherwise, this function will return with error if there are dependencies of this container that aren't running.
func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) {
// Container must be created or stopped to be started
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateCreated ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
+ if !(c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateCreated ||
+ c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
}
if !recursive {
@@ -651,13 +743,13 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Or initialize it if necessary
if err := c.init(ctx, false); err != nil {
return err
@@ -674,7 +766,7 @@ func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error {
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
- return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ return errors.Wrapf(define.ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
}
return nil
@@ -712,7 +804,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
if len(graph.nodes) == 0 {
return nil
}
- return errors.Wrapf(ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
}
ctrErrors := make(map[string]error)
@@ -728,7 +820,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
for _, e := range ctrErrors {
logrus.Errorf("%q", e)
}
- return errors.Wrapf(ErrInternal, "error starting some containers")
+ return errors.Wrapf(define.ErrInternal, "error starting some containers")
}
return nil
}
@@ -760,7 +852,7 @@ func (c *Container) getAllDependencies(visited map[string]*Container) error {
}
// if the dependency is already running, we can assume its dependencies are also running
// so no need to add them to those we need to start
- if status != ContainerStateRunning {
+ if status != define.ContainerStateRunning {
visited[depID] = dep
if err := dep.getAllDependencies(visited); err != nil {
return err
@@ -792,7 +884,7 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
}
- if state != ContainerStateRunning {
+ if state != define.ContainerStateRunning {
notRunning = append(notRunning, dep)
}
depCtrs[dep] = depCtr
@@ -801,34 +893,6 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
return notRunning, nil
}
-// Check if a container's dependencies are running
-// Returns a []string containing the IDs of dependencies that are not running
-// Assumes depencies are already locked, and will be passed in
-// Accepts a map[string]*Container containing, at a minimum, the locked
-// dependency containers
-// (This must be a map from container ID to container)
-func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container) ([]string, error) {
- deps := c.Dependencies()
- notRunning := []string{}
-
- for _, dep := range deps {
- depCtr, ok := depCtrs[dep]
- if !ok {
- return nil, errors.Wrapf(ErrNoSuchCtr, "container %s depends on container %s but it is not on containers passed to checkDependenciesRunning", c.ID(), dep)
- }
-
- if err := c.syncContainer(); err != nil {
- return nil, err
- }
-
- if depCtr.state.State != ContainerStateRunning {
- notRunning = append(notRunning, dep)
- }
- }
-
- return notRunning, nil
-}
-
func (c *Container) completeNetworkSetup() error {
netDisabled, err := c.NetworkDisabled()
if err != nil {
@@ -852,19 +916,19 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
span.SetTag("struct", "container")
defer span.Finish()
- // Generate the OCI spec
- spec, err := c.generateSpec(ctx)
+ // Generate the OCI newSpec
+ newSpec, err := c.generateSpec(ctx)
if err != nil {
return err
}
- // Save the OCI spec to disk
- if err := c.saveSpec(spec); err != nil {
+ // Save the OCI newSpec to disk
+ if err := c.saveSpec(newSpec); err != nil {
return err
}
// With the spec complete, do an OCI create
- if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil {
+ if err := c.ociRuntime.createContainer(c, nil); err != nil {
return err
}
@@ -872,7 +936,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
c.state.ExitCode = 0
c.state.Exited = false
- c.state.State = ContainerStateCreated
+ c.state.State = define.ContainerStateCreated
c.state.StoppedByUser = false
c.state.RestartPolicyMatch = false
@@ -903,7 +967,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// If the container is not ContainerStateStopped or
// ContainerStateCreated, do nothing.
- if c.state.State != ContainerStateStopped && c.state.State != ContainerStateCreated {
+ if c.state.State != define.ContainerStateStopped && c.state.State != define.ContainerStateCreated {
return nil
}
@@ -919,10 +983,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// If we were Stopped, we are now Exited, as we've removed ourself
// from the runtime.
// If we were Created, we are now Configured.
- if c.state.State == ContainerStateStopped {
- c.state.State = ContainerStateExited
- } else if c.state.State == ContainerStateCreated {
- c.state.State = ContainerStateConfigured
+ if c.state.State == define.ContainerStateStopped {
+ c.state.State = define.ContainerStateExited
+ } else if c.state.State == define.ContainerStateCreated {
+ c.state.State = define.ContainerStateConfigured
}
if c.valid {
@@ -961,17 +1025,17 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
// Does not lock or check validity
func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateUnknown, throw an error
- if c.state.State == ContainerStateUnknown {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
+ if c.state.State == define.ContainerStateUnknown {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
}
// If we are running, do nothing
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
return nil
}
// If we are paused, throw an error
- if c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
+ if c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
}
defer func() {
@@ -988,14 +1052,14 @@ func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateStopped we need to remove from runtime
// And reset to ContainerStateConfigured
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
logrus.Debugf("Recreating container %s in OCI runtime", c.ID())
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
if err := c.init(ctx, false); err != nil {
return err
}
@@ -1011,12 +1075,12 @@ func (c *Container) start() error {
logrus.Debugf("Starting container %s with command %v", c.ID(), c.config.Spec.Process.Args)
}
- if err := c.runtime.ociRuntime.startContainer(c); err != nil {
+ if err := c.ociRuntime.startContainer(c); err != nil {
return err
}
logrus.Debugf("Started container %s", c.ID())
- c.state.State = ContainerStateRunning
+ c.state.State = define.ContainerStateRunning
if c.config.HealthCheckConfig != nil {
if err := c.updateHealthStatus(HealthCheckStarting); err != nil {
@@ -1036,10 +1100,12 @@ func (c *Container) start() error {
func (c *Container) stop(timeout uint) error {
logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout)
- if err := c.runtime.ociRuntime.stopContainer(c, timeout); err != nil {
+ if err := c.ociRuntime.stopContainer(c, timeout); err != nil {
return err
}
+ c.state.PID = 0
+ c.state.ConmonPID = 0
c.state.StoppedByUser = true
if err := c.save(); err != nil {
return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())
@@ -1051,39 +1117,39 @@ func (c *Container) stop(timeout uint) error {
// Internal, non-locking function to pause a container
func (c *Container) pause() error {
- if err := c.runtime.ociRuntime.pauseContainer(c); err != nil {
+ if err := c.ociRuntime.pauseContainer(c); err != nil {
return err
}
logrus.Debugf("Paused container %s", c.ID())
- c.state.State = ContainerStatePaused
+ c.state.State = define.ContainerStatePaused
return c.save()
}
// Internal, non-locking function to unpause a container
func (c *Container) unpause() error {
- if err := c.runtime.ociRuntime.unpauseContainer(c); err != nil {
+ if err := c.ociRuntime.unpauseContainer(c); err != nil {
return err
}
logrus.Debugf("Unpaused container %s", c.ID())
- c.state.State = ContainerStateRunning
+ c.state.State = define.ContainerStateRunning
return c.save()
}
// Internal, non-locking function to restart a container
func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) {
- if c.state.State == ContainerStateUnknown || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
+ if c.state.State == define.ContainerStateUnknown || c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
}
c.newContainerEvent(events.Restart)
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
if err := c.stop(timeout); err != nil {
return err
}
@@ -1099,13 +1165,13 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Initialize the container
if err := c.init(ctx, false); err != nil {
return err
@@ -1161,8 +1227,8 @@ func (c *Container) cleanupStorage() error {
return nil
}
- for _, mount := range c.config.Mounts {
- if err := c.unmountSHM(mount); err != nil {
+ for _, containerMount := range c.config.Mounts {
+ if err := c.unmountSHM(containerMount); err != nil {
return err
}
}
@@ -1243,7 +1309,7 @@ func (c *Container) delete(ctx context.Context) (err error) {
span.SetTag("struct", "container")
defer span.Finish()
- if err := c.runtime.ociRuntime.deleteContainer(c); err != nil {
+ if err := c.ociRuntime.deleteContainer(c); err != nil {
return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
}
@@ -1276,6 +1342,7 @@ func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
return err
}
for i, hook := range extensionHooks {
+ hook := hook
logrus.Debugf("container %s: invoke poststop hook %d, path %s", c.ID(), i, hook.Path)
var stderr, stdout bytes.Buffer
hookErr, err := exec.Run(ctx, &hook, state, &stdout, &stderr, exec.DefaultPostKillTimeout)
@@ -1345,7 +1412,7 @@ func (c *Container) appendStringToRundir(destFile, output string) (string, error
return filepath.Join(c.state.RunDir, destFile), nil
}
-// Save OCI spec to disk, replacing any existing specs for the container
+// saveSpec saves the OCI spec to disk, replacing any existing specs for the container
func (c *Container) saveSpec(spec *spec.Spec) error {
// If the OCI spec already exists, we need to replace it
// Cannot guarantee some things, e.g. network namespaces, have the same
@@ -1393,14 +1460,14 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten
}
return nil, err
}
- hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
+ ociHooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
if err != nil {
return nil, err
}
- if len(hooks) > 0 || config.Hooks != nil {
- logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir)
+ if len(ociHooks) > 0 || config.Hooks != nil {
+ logrus.Warnf("implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
}
- for i, hook := range hooks {
+ for i, hook := range ociHooks {
allHooks[i] = hook
}
}
@@ -1454,13 +1521,6 @@ func (c *Container) unmount(force bool) error {
return nil
}
-// getExcludedCGroups returns a string slice of cgroups we want to exclude
-// because runc or other components are unaware of them.
-func getExcludedCGroups() (excludes []string) {
- excludes = []string{"rdma"}
- return
-}
-
// this should be from chrootarchive.
func (c *Container) copyWithTarFromImage(src, dest string) error {
mountpoint, err := c.mount()
@@ -1482,18 +1542,86 @@ func (c *Container) copyWithTarFromImage(src, dest string) error {
// If it is, we'll remove the container anyways.
// Returns nil if safe to remove, or an error describing why it's unsafe if not.
func (c *Container) checkReadyForRemoval() error {
- if c.state.State == ContainerStateUnknown {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
+ if c.state.State == define.ContainerStateUnknown {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
}
- if c.state.State == ContainerStateRunning ||
- c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String())
+ if c.state.State == define.ContainerStateRunning ||
+ c.state.State == define.ContainerStatePaused {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String())
}
if len(c.state.ExecSessions) != 0 {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID())
}
return nil
}
+
+// writeJSONFile marshalls and writes the given data to a JSON file
+// in the bundle path
+func (c *Container) writeJSONFile(v interface{}, file string) (err error) {
+ fileJSON, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID())
+ }
+ file = filepath.Join(c.bundlePath(), file)
+ if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// prepareCheckpointExport writes the config and spec to
+// JSON files for later export
+func (c *Container) prepareCheckpointExport() (err error) {
+ // save live config
+ if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil {
+ return err
+ }
+
+ // save spec
+ jsonPath := filepath.Join(c.bundlePath(), "config.json")
+ g, err := generate.NewFromFile(jsonPath)
+ if err != nil {
+ logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err)
+ return err
+ }
+ if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// sortUserVolumes sorts the volumes specified for a container
+// between named and normal volumes
+func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) {
+ namedUserVolumes := []*ContainerNamedVolume{}
+ userMounts := []spec.Mount{}
+
+ // We need to parse all named volumes and mounts into maps, so we don't
+ // end up with repeated lookups for each user volume.
+ // Map destination to struct, as destination is what is stored in
+ // UserVolumes.
+ namedVolumes := make(map[string]*ContainerNamedVolume)
+ mounts := make(map[string]spec.Mount)
+ for _, namedVol := range c.config.NamedVolumes {
+ namedVolumes[namedVol.Dest] = namedVol
+ }
+ for _, mount := range ctrSpec.Mounts {
+ mounts[mount.Destination] = mount
+ }
+
+ for _, vol := range c.config.UserVolumes {
+ if volume, ok := namedVolumes[vol]; ok {
+ namedUserVolumes = append(namedUserVolumes, volume)
+ } else if mount, ok := mounts[vol]; ok {
+ userMounts = append(userMounts, mount)
+ } else {
+ logrus.Warnf("Could not find mount at destination %q when parsing user volumes for container %s", vol, c.ID())
+ }
+ }
+ return namedUserVolumes, userMounts
+}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index f25f76092..afcf51a11 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
"fmt"
+ "io"
"io/ioutil"
"net"
"os"
@@ -19,12 +20,15 @@ import (
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/buildah/pkg/secrets"
+ "github.com/containers/libpod/libpod/define"
crioAnnotations "github.com/containers/libpod/pkg/annotations"
"github.com/containers/libpod/pkg/apparmor"
+ "github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/criu"
"github.com/containers/libpod/pkg/lookup"
"github.com/containers/libpod/pkg/resolvconf"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage/pkg/archive"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -181,9 +185,13 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// If network namespace was requested, add it now
if c.config.CreateNetNS {
if c.config.PostConfigureNetNS {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, "")
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, ""); err != nil {
+ return nil, err
+ }
} else {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil {
+ return nil, err
+ }
}
}
@@ -310,6 +318,11 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if err := c.addNamespaceContainer(&g, UserNS, c.config.UserNsCtr, spec.UserNamespace); err != nil {
return nil, err
}
+ if len(g.Config.Linux.UIDMappings) == 0 {
+ // runc complains if no mapping is specified, even if we join another ns. So provide a dummy mapping
+ g.AddLinuxUIDMapping(uint32(0), uint32(0), uint32(1))
+ g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1))
+ }
}
if c.config.UTSNsCtr != "" {
if err := c.addNamespaceContainer(&g, UTSNS, c.config.UTSNsCtr, spec.UTSNamespace); err != nil {
@@ -347,7 +360,11 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
g.AddProcessEnv("container", "libpod")
}
- if rootless.IsRootless() {
+ unified, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, err
+ }
+ if rootless.IsRootless() && !unified {
g.SetLinuxCgroupsPath("")
} else if c.runtime.config.CgroupManager == SystemdCgroupsManager {
// When runc is set to use Systemd as a cgroup manager, it
@@ -407,7 +424,9 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if rootPropagation != "" {
logrus.Debugf("set root propagation to %q", rootPropagation)
- g.SetLinuxRootPropagation(rootPropagation)
+ if err := g.SetLinuxRootPropagation(rootPropagation); err != nil {
+ return nil, err
+ }
}
// Warning: precreate hooks may alter g.Config in place.
@@ -422,7 +441,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// It also expects to be able to write to /sys/fs/cgroup/systemd and /var/log/journal
func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) error {
options := []string{"rw", "rprivate", "noexec", "nosuid", "nodev"}
- for _, dest := range []string{"/run"} {
+ for _, dest := range []string{"/run", "/run/lock"} {
if MountExists(mounts, dest) {
continue
}
@@ -496,11 +515,75 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
return nil
}
+func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) (err error) {
+ if (len(c.config.NamedVolumes) > 0) || (len(c.Dependencies()) > 0) {
+ return errors.Errorf("Cannot export checkpoints of containers with named volumes or dependencies")
+ }
+ logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), dest)
+
+ includeFiles := []string{
+ "checkpoint",
+ "artifacts",
+ "ctr.log",
+ "config.dump",
+ "spec.dump",
+ "network.status"}
+
+ // Get root file-system changes included in the checkpoint archive
+ rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
+ if !ignoreRootfs {
+ rootfsDiffFile, err := os.Create(rootfsDiffPath)
+ if err != nil {
+ return errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
+ }
+ tarStream, err := c.runtime.GetDiffTarStream("", c.ID())
+ if err != nil {
+ return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
+ }
+ _, err = io.Copy(rootfsDiffFile, tarStream)
+ if err != nil {
+ return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
+ }
+ tarStream.Close()
+ rootfsDiffFile.Close()
+ includeFiles = append(includeFiles, "rootfs-diff.tar")
+ }
+
+ input, err := archive.TarWithOptions(c.bundlePath(), &archive.TarOptions{
+ Compression: archive.Gzip,
+ IncludeSourceDir: true,
+ IncludeFiles: includeFiles,
+ })
+
+ if err != nil {
+ return errors.Wrapf(err, "error reading checkpoint directory %q", c.ID())
+ }
+
+ outFile, err := os.Create(dest)
+ if err != nil {
+ return errors.Wrapf(err, "error creating checkpoint export file %q", dest)
+ }
+ defer outFile.Close()
+
+ if err := os.Chmod(dest, 0600); err != nil {
+ return errors.Wrapf(err, "cannot chmod %q", dest)
+ }
+
+ _, err = io.Copy(outFile, input)
+ if err != nil {
+ return err
+ }
+
+ os.Remove(rootfsDiffPath)
+
+ return nil
+}
+
func (c *Container) checkpointRestoreSupported() (err error) {
if !criu.CheckForCriu() {
return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion)
}
- if !c.runtime.ociRuntime.featureCheckCheckpointing() {
+ if !c.ociRuntime.featureCheckCheckpointing() {
return errors.Errorf("Configured runtime does not support checkpoint/restore")
}
return nil
@@ -514,7 +597,9 @@ func (c *Container) checkpointRestoreLabelLog(fileName string) (err error) {
if err != nil {
return errors.Wrapf(err, "failed to create CRIU log file %q", dumpLog)
}
- logFile.Close()
+ if err := logFile.Close(); err != nil {
+ logrus.Errorf("unable to close log file: %q", err)
+ }
if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
return errors.Wrapf(err, "failed to label CRIU log file %q", dumpLog)
}
@@ -526,15 +611,15 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return err
}
- if c.state.State != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
+ if c.state.State != define.ContainerStateRunning {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
if err := c.checkpointRestoreLabelLog("dump.log"); err != nil {
return err
}
- if err := c.runtime.ociRuntime.checkpointContainer(c, options); err != nil {
+ if err := c.ociRuntime.checkpointContainer(c, options); err != nil {
return err
}
@@ -549,10 +634,16 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return err
}
+ if options.TargetFile != "" {
+ if err = c.exportCheckpoint(options.TargetFile, options.IgnoreRootfs); err != nil {
+ return err
+ }
+ }
+
logrus.Debugf("Checkpointed container %s", c.ID())
if !options.KeepRunning {
- c.state.State = ContainerStateStopped
+ c.state.State = define.ContainerStateStopped
// Cleanup Storage and Network
if err := c.cleanup(ctx); err != nil {
@@ -561,23 +652,67 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if !options.Keep {
- // Remove log file
- os.Remove(filepath.Join(c.bundlePath(), "dump.log"))
- // Remove statistic file
- os.Remove(filepath.Join(c.bundlePath(), "stats-dump"))
+ cleanup := []string{
+ "dump.log",
+ "stats-dump",
+ "config.dump",
+ "spec.dump",
+ }
+ for _, del := range cleanup {
+ file := filepath.Join(c.bundlePath(), del)
+ if err := os.Remove(file); err != nil {
+ logrus.Debugf("unable to remove file %s", file)
+ }
+ }
}
+ c.state.FinishedTime = time.Now()
return c.save()
}
+func (c *Container) importCheckpoint(input string) (err error) {
+ archiveFile, err := os.Open(input)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
+ }
+
+ defer archiveFile.Close()
+ options := &archive.TarOptions{
+ ExcludePatterns: []string{
+ // config.dump and spec.dump are only required
+ // container creation
+ "config.dump",
+ "spec.dump",
+ },
+ }
+ err = archive.Untar(archiveFile, c.bundlePath(), options)
+ if err != nil {
+ return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ }
+
+ // Make sure the newly created config.json exists on disk
+ g := generate.Generator{Config: c.config.Spec}
+ if err = c.saveSpec(g.Config); err != nil {
+ return errors.Wrap(err, "Saving imported container specification for restore failed")
+ }
+
+ return nil
+}
+
func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
if err := c.checkpointRestoreSupported(); err != nil {
return err
}
- if (c.state.State != ContainerStateConfigured) && (c.state.State != ContainerStateExited) {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
+ if (c.state.State != define.ContainerStateConfigured) && (c.state.State != define.ContainerStateExited) {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
+ }
+
+ if options.TargetFile != "" {
+ if err = c.importCheckpoint(options.TargetFile); err != nil {
+ return err
+ }
}
// Let's try to stat() CRIU's inventory file. If it does not exist, it makes
@@ -593,7 +728,13 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Read network configuration from checkpoint
// Currently only one interface with one IP is supported.
networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status"))
- if err == nil {
+ // If the restored container should get a new name, the IP address of
+ // the container will not be restored. This assumes that if a new name is
+ // specified, the container is restored multiple times.
+ // TODO: This implicit restoring with or without IP depending on an
+ // unrelated restore parameter (--name) does not seem like the
+ // best solution.
+ if err == nil && options.Name == "" {
// The file with the network.status does exist. Let's restore the
// container with the same IP address as during checkpointing.
defer networkStatusFile.Close()
@@ -602,7 +743,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
return err
}
- json.Unmarshal(networkJSON, &networkStatus)
+ if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
+ return err
+ }
// Take the first IP address
var IP net.IP
if len(networkStatus) > 0 {
@@ -637,30 +780,72 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return err
}
+ // Restoring from an import means that we are doing migration
+ if options.TargetFile != "" {
+ g.SetRootPath(c.state.Mountpoint)
+ }
+
// We want to have the same network namespace as before.
if c.config.CreateNetNS {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil {
+ return err
+ }
}
- // Save the OCI spec to disk
- if err := c.saveSpec(g.Spec()); err != nil {
+ if err := c.makeBindMounts(); err != nil {
return err
}
- if err := c.makeBindMounts(); err != nil {
- return err
+ if options.TargetFile != "" {
+ for dstPath, srcPath := range c.state.BindMounts {
+ newMount := spec.Mount{
+ Type: "bind",
+ Source: srcPath,
+ Destination: dstPath,
+ Options: []string{"bind", "private"},
+ }
+ if c.IsReadOnly() && dstPath != "/dev/shm" {
+ newMount.Options = append(newMount.Options, "ro", "nosuid", "noexec", "nodev")
+ }
+ if !MountExists(g.Mounts(), dstPath) {
+ g.AddMount(newMount)
+ }
+ }
}
// Cleanup for a working restore.
- c.removeConmonFiles()
+ if err := c.removeConmonFiles(); err != nil {
+ return err
+ }
+
+ // Save the OCI spec to disk
+ if err := c.saveSpec(g.Config); err != nil {
+ return err
+ }
- if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, &options); err != nil {
+ // Before actually restarting the container, apply the root file-system changes
+ if !options.IgnoreRootfs {
+ rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
+ if _, err := os.Stat(rootfsDiffPath); err == nil {
+ // Only do this if a rootfs-diff.tar actually exists
+ rootfsDiffFile, err := os.Open(rootfsDiffPath)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to open root file-system diff file %s", rootfsDiffPath)
+ }
+ if err := c.runtime.ApplyDiffTarStream(c.ID(), rootfsDiffFile); err != nil {
+ return errors.Wrapf(err, "Failed to apply root file-system diff file %s", rootfsDiffPath)
+ }
+ rootfsDiffFile.Close()
+ }
+ }
+
+ if err := c.ociRuntime.createContainer(c, &options); err != nil {
return err
}
logrus.Debugf("Restored container %s", c.ID())
- c.state.State = ContainerStateRunning
+ c.state.State = define.ContainerStateRunning
if !options.Keep {
// Delete all checkpoint related files. At this point, in theory, all files
@@ -671,9 +856,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
logrus.Debugf("Non-fatal: removal of checkpoint directory (%s) failed: %v", c.CheckpointPath(), err)
}
- cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status"}
- for _, delete := range cleanup {
- file := filepath.Join(c.bundlePath(), delete)
+ cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status", "rootfs-diff.tar"}
+ for _, del := range cleanup {
+ file := filepath.Join(c.bundlePath(), del)
err = os.Remove(file)
if err != nil {
logrus.Debugf("Non-fatal: removal of checkpoint file (%s) failed: %v", file, err)
@@ -703,14 +888,14 @@ func (c *Container) makeBindMounts() error {
// will recreate. Only do this if we aren't sharing them with
// another container.
if c.config.NetNsCtr == "" {
- if path, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
- if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
+ if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error removing container %s resolv.conf", c.ID())
}
delete(c.state.BindMounts, "/etc/resolv.conf")
}
- if path, ok := c.state.BindMounts["/etc/hosts"]; ok {
- if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
+ if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error removing container %s hosts", c.ID())
}
delete(c.state.BindMounts, "/etc/hosts")
@@ -847,10 +1032,10 @@ func (c *Container) makeBindMounts() error {
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() (string, error) {
resolvConf := "/etc/resolv.conf"
- for _, ns := range c.config.Spec.Linux.Namespaces {
- if ns.Type == spec.NetworkNamespace {
- if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
- definedPath := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
+ for _, namespace := range c.config.Spec.Linux.Namespaces {
+ if namespace.Type == spec.NetworkNamespace {
+ if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") {
+ definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf")
_, err := os.Stat(definedPath)
if err == nil {
resolvConf = definedPath
@@ -885,7 +1070,7 @@ func (c *Container) generateResolvConf() (string, error) {
nameservers := resolvconf.GetNameservers(resolv.Content)
// slirp4netns has a built in DNS server.
if c.config.NetMode.IsSlirp4netns() {
- nameservers = append(nameservers, "10.0.2.3")
+ nameservers = append([]string{"10.0.2.3"}, nameservers...)
}
if len(c.config.DNSServer) > 0 {
// We store DNS servers as net.IP, so need to convert to string
@@ -954,6 +1139,10 @@ func (c *Container) getHosts() string {
hosts += fmt.Sprintf("%s %s\n", fields[1], fields[0])
}
}
+ if c.config.NetMode.IsSlirp4netns() {
+ // When using slirp4netns, the interface gets a static IP
+ hosts += fmt.Sprintf("# used by slirp4netns\n%s\t%s\n", "10.0.2.100", c.Hostname())
+ }
if len(c.state.NetworkStatus) > 0 && len(c.state.NetworkStatus[0].IPs) > 0 {
ipAddress := strings.Split(c.state.NetworkStatus[0].IPs[0].Address.String(), "/")[0]
hosts += fmt.Sprintf("%s\t%s\n", ipAddress, c.Hostname())
@@ -971,10 +1160,10 @@ func (c *Container) generatePasswd() (string, error) {
if c.config.User == "" {
return "", nil
}
- spec := strings.SplitN(c.config.User, ":", 2)
- userspec := spec[0]
- if len(spec) > 1 {
- groupspec = spec[1]
+ splitSpec := strings.SplitN(c.config.User, ":", 2)
+ userspec := splitSpec[0]
+ if len(splitSpec) > 1 {
+ groupspec = splitSpec[1]
}
// If a non numeric User, then don't generate passwd
uid, err := strconv.ParseUint(userspec, 10, 32)
@@ -1012,7 +1201,7 @@ func (c *Container) generatePasswd() (string, error) {
if err != nil {
return "", errors.Wrapf(err, "failed to create temporary passwd file")
}
- if os.Chmod(passwdFile, 0644); err != nil {
+ if err := os.Chmod(passwdFile, 0644); err != nil {
return "", err
}
return passwdFile, nil
diff --git a/libpod/container_internal_unsupported.go b/libpod/container_internal_unsupported.go
index f707b350c..6fa19a778 100644
--- a/libpod/container_internal_unsupported.go
+++ b/libpod/container_internal_unsupported.go
@@ -5,35 +5,36 @@ package libpod
import (
"context"
+ "github.com/containers/libpod/libpod/define"
spec "github.com/opencontainers/runtime-spec/specs-go"
)
func (c *Container) mountSHM(shmOptions string) error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (c *Container) unmountSHM(mount string) error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (c *Container) prepare() (err error) {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (c *Container) cleanupNetwork() error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
- return nil, ErrNotImplemented
+ return nil, define.ErrNotImplemented
}
func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (c *Container) copyOwnerAndPerms(source, dest string) error {
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index e549673a6..8a87a8796 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -9,6 +9,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/logs"
journal "github.com/coreos/go-systemd/sdjournal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -27,7 +28,7 @@ const (
bufLen = 16384
)
-func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLine) error {
+func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
var config journal.JournalReaderConfig
config.NumFromTail = options.Tail
config.Formatter = journalFormatter
@@ -79,7 +80,7 @@ func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLin
// because we are reusing bytes, we need to make
// sure the old data doesn't get into the new line
bytestr := string(bytes[:ec])
- logLine, err2 := newLogLine(bytestr)
+ logLine, err2 := logs.NewLogLine(bytestr)
if err2 != nil {
logrus.Error(err2)
continue
@@ -98,7 +99,7 @@ func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLin
func journalFormatter(entry *journal.JournalEntry) (string, error) {
usec := entry.RealtimeTimestamp
- tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logTimeFormat)
+ tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logs.LogTimeFormat)
output := fmt.Sprintf("%s ", tsString)
priority, ok := entry.Fields["PRIORITY"]
if !ok {
@@ -114,9 +115,9 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) {
// if CONTAINER_PARTIAL_MESSAGE is defined, the log type is "P"
if _, ok := entry.Fields["CONTAINER_PARTIAL_MESSAGE"]; ok {
- output += fmt.Sprintf("%s ", partialLogType)
+ output += fmt.Sprintf("%s ", logs.PartialLogType)
} else {
- output += fmt.Sprintf("%s ", fullLogType)
+ output += fmt.Sprintf("%s ", logs.FullLogType)
}
// Finally, append the message
@@ -129,12 +130,12 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) {
}
type FollowBuffer struct {
- logChannel chan *LogLine
+ logChannel chan *logs.LogLine
}
func (f FollowBuffer) Write(p []byte) (int, error) {
bytestr := string(p)
- logLine, err := newLogLine(bytestr)
+ logLine, err := logs.NewLogLine(bytestr)
if err != nil {
return -1, err
}
diff --git a/libpod/container_log_unsupported.go b/libpod/container_log_unsupported.go
index 0ec5740e2..2c4492b10 100644
--- a/libpod/container_log_unsupported.go
+++ b/libpod/container_log_unsupported.go
@@ -3,9 +3,11 @@
package libpod
import (
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/logs"
"github.com/pkg/errors"
)
-func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLine) error {
- return errors.Wrapf(ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
+func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
+ return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
}
diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go
index 2e0e83c05..ce471838d 100644
--- a/libpod/container_top_linux.go
+++ b/libpod/container_top_linux.go
@@ -6,6 +6,7 @@ import (
"strconv"
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/psgo"
"github.com/pkg/errors"
@@ -18,7 +19,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "unable to look up state for %s", c.ID())
}
- if conStat != ContainerStateRunning {
+ if conStat != define.ContainerStateRunning {
return nil, errors.Errorf("top can only be used on running containers")
}
@@ -60,9 +61,3 @@ func (c *Container) GetContainerPidInformation(descriptors []string) ([]string,
}
return res, nil
}
-
-// GetContainerPidInformationDescriptors returns a string slice of all supported
-// format descriptors of GetContainerPidInformation.
-func GetContainerPidInformationDescriptors() ([]string, error) {
- return psgo.ListDescriptors(), nil
-}
diff --git a/libpod/container_top_unsupported.go b/libpod/container_top_unsupported.go
index 1e6fb836d..382c98b54 100644
--- a/libpod/container_top_unsupported.go
+++ b/libpod/container_top_unsupported.go
@@ -2,6 +2,8 @@
package libpod
+import "github.com/containers/libpod/libpod/define"
+
// GetContainerPidInformation returns process-related data of all processes in
// the container. The output data can be controlled via the `descriptors`
// argument which expects format descriptors and supports all AIXformat
@@ -11,11 +13,5 @@ package libpod
//
// For more details, please refer to github.com/containers/psgo.
func (c *Container) GetContainerPidInformation(descriptors []string) ([]string, error) {
- return nil, ErrNotImplemented
-}
-
-// GetContainerPidInformationDescriptors returns a string slice of all supported
-// format descriptors of GetContainerPidInformation.
-func GetContainerPidInformationDescriptors() ([]string, error) {
- return nil, ErrNotImplemented
+ return nil, define.ErrNotImplemented
}
diff --git a/libpod/define/config.go b/libpod/define/config.go
new file mode 100644
index 000000000..d8d6ccf55
--- /dev/null
+++ b/libpod/define/config.go
@@ -0,0 +1,20 @@
+package define
+
+var (
+ // DefaultInitPath is the default path to the container-init binary
+ DefaultInitPath = "/usr/libexec/podman/catatonit"
+ // DefaultInfraImage to use for infra container
+ DefaultInfraImage = "k8s.gcr.io/pause:3.1"
+ // DefaultInfraCommand to be run in an infra container
+ DefaultInfraCommand = "/pause"
+)
+
+// CtrRemoveTimeout is the default number of seconds to wait after stopping a container
+// before sending the kill signal
+const CtrRemoveTimeout = 10
+
+// InfoData holds the info type, i.e store, host etc and the data for each type
+type InfoData struct {
+ Type string
+ Data map[string]interface{}
+}
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
new file mode 100644
index 000000000..ab2527b3e
--- /dev/null
+++ b/libpod/define/containerstate.go
@@ -0,0 +1,73 @@
+package define
+
+import "github.com/pkg/errors"
+
+// ContainerStatus represents the current state of a container
+type ContainerStatus int
+
+const (
+ // ContainerStateUnknown indicates that the container is in an error
+ // state where information about it cannot be retrieved
+ ContainerStateUnknown ContainerStatus = iota
+ // ContainerStateConfigured indicates that the container has had its
+ // storage configured but it has not been created in the OCI runtime
+ ContainerStateConfigured ContainerStatus = iota
+ // ContainerStateCreated indicates the container has been created in
+ // the OCI runtime but not started
+ ContainerStateCreated ContainerStatus = iota
+ // ContainerStateRunning indicates the container is currently executing
+ ContainerStateRunning ContainerStatus = iota
+ // ContainerStateStopped indicates that the container was running but has
+ // exited
+ ContainerStateStopped ContainerStatus = iota
+ // ContainerStatePaused indicates that the container has been paused
+ ContainerStatePaused ContainerStatus = iota
+ // ContainerStateExited indicates the the container has stopped and been
+ // cleaned up
+ ContainerStateExited ContainerStatus = iota
+)
+
+// ContainerStatus returns a string representation for users
+// of a container state
+func (t ContainerStatus) String() string {
+ switch t {
+ case ContainerStateUnknown:
+ return "unknown"
+ case ContainerStateConfigured:
+ return "configured"
+ case ContainerStateCreated:
+ return "created"
+ case ContainerStateRunning:
+ return "running"
+ case ContainerStateStopped:
+ return "stopped"
+ case ContainerStatePaused:
+ return "paused"
+ case ContainerStateExited:
+ return "exited"
+ }
+ return "bad state"
+}
+
+// StringToContainerStatus converts a string representation of a containers
+// status into an actual container status type
+func StringToContainerStatus(status string) (ContainerStatus, error) {
+ switch status {
+ case ContainerStateUnknown.String():
+ return ContainerStateUnknown, nil
+ case ContainerStateConfigured.String():
+ return ContainerStateConfigured, nil
+ case ContainerStateCreated.String():
+ return ContainerStateCreated, nil
+ case ContainerStateRunning.String():
+ return ContainerStateRunning, nil
+ case ContainerStateStopped.String():
+ return ContainerStateStopped, nil
+ case ContainerStatePaused.String():
+ return ContainerStatePaused, nil
+ case ContainerStateExited.String():
+ return ContainerStateExited, nil
+ default:
+ return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
+ }
+}
diff --git a/libpod/errors.go b/libpod/define/errors.go
index dd82d0796..a4368a9aa 100644
--- a/libpod/errors.go
+++ b/libpod/define/errors.go
@@ -1,4 +1,4 @@
-package libpod
+package define
import (
"errors"
@@ -95,5 +95,16 @@ var (
// ErrOSNotSupported indicates the function is not available on the particular
// OS.
- ErrOSNotSupported = errors.New("No support for this OS yet")
+ ErrOSNotSupported = errors.New("no support for this OS yet")
+
+ // ErrOCIRuntime indicates a generic error from the OCI runtime
+ ErrOCIRuntime = errors.New("OCI runtime error")
+
+ // ErrOCIRuntimePermissionDenied indicates the OCI runtime attempted to invoke a command that returned
+ // a permission denied error
+ ErrOCIRuntimePermissionDenied = errors.New("OCI runtime permission denied error")
+
+ // ErrOCIRuntimeNotFound indicates the OCI runtime attempted to invoke a command
+ // that was not found
+ ErrOCIRuntimeNotFound = errors.New("OCI runtime command not found error")
)
diff --git a/libpod/define/exec_codes.go b/libpod/define/exec_codes.go
new file mode 100644
index 000000000..7184f1e59
--- /dev/null
+++ b/libpod/define/exec_codes.go
@@ -0,0 +1,30 @@
+package define
+
+import (
+ "github.com/pkg/errors"
+)
+
+const (
+ // ExecErrorCodeGeneric is the default error code to return from an exec session if libpod failed
+ // prior to calling the runtime
+ ExecErrorCodeGeneric = 125
+ // ExecErrorCodeCannotInvoke is the error code to return when the runtime fails to invoke a command
+ // an example of this can be found by trying to execute a directory:
+ // `podman exec -l /etc`
+ ExecErrorCodeCannotInvoke = 126
+ // ExecErrorCodeNotFound is the error code to return when a command cannot be found
+ ExecErrorCodeNotFound = 127
+)
+
+// TranslateExecErrorToExitCode takes an error and checks whether it
+// has a predefined exit code associated. If so, it returns that, otherwise it returns
+// the exit code originally stated in libpod.Exec()
+func TranslateExecErrorToExitCode(originalEC int, err error) int {
+ if errors.Cause(err) == ErrOCIRuntimePermissionDenied {
+ return ExecErrorCodeCannotInvoke
+ }
+ if errors.Cause(err) == ErrOCIRuntimeNotFound {
+ return ExecErrorCodeNotFound
+ }
+ return originalEC
+}
diff --git a/libpod/version.go b/libpod/define/version.go
index d2b99a275..0f9f49050 100644
--- a/libpod/version.go
+++ b/libpod/define/version.go
@@ -1,4 +1,4 @@
-package libpod
+package define
import (
"runtime"
diff --git a/libpod/diff.go b/libpod/diff.go
index f348e6b81..925bda927 100644
--- a/libpod/diff.go
+++ b/libpod/diff.go
@@ -1,6 +1,9 @@
package libpod
import (
+ "archive/tar"
+ "io"
+
"github.com/containers/libpod/libpod/layers"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
@@ -44,6 +47,59 @@ func (r *Runtime) GetDiff(from, to string) ([]archive.Change, error) {
return rchanges, err
}
+// skipFileInTarAchive is an archive.TarModifierFunc function
+// which tells archive.ReplaceFileTarWrapper to skip files
+// from the tarstream
+func skipFileInTarAchive(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
+ return nil, nil, nil
+}
+
+// GetDiffTarStream returns the differences between the two images, layers, or containers.
+// It is the same functionality as GetDiff() except that it returns a tarstream
+func (r *Runtime) GetDiffTarStream(from, to string) (io.ReadCloser, error) {
+ toLayer, err := r.getLayerID(to)
+ if err != nil {
+ return nil, err
+ }
+ fromLayer := ""
+ if from != "" {
+ fromLayer, err = r.getLayerID(from)
+ if err != nil {
+ return nil, err
+ }
+ }
+ rc, err := r.store.Diff(fromLayer, toLayer, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Skip files in the tar archive which are listed
+ // in containerMounts map. Just as in the GetDiff()
+ // function from above
+ filterMap := make(map[string]archive.TarModifierFunc)
+ for key := range containerMounts {
+ filterMap[key[1:]] = skipFileInTarAchive
+ // In the tarstream directories always include a trailing '/'.
+ // For simplicity this duplicates every entry from
+ // containerMounts with a trailing '/', as containerMounts
+ // does not use trailing '/' for directories.
+ filterMap[key[1:]+"/"] = skipFileInTarAchive
+ }
+
+ filteredTarStream := archive.ReplaceFileTarWrapper(rc, filterMap)
+ return filteredTarStream, nil
+}
+
+// ApplyDiffTarStream applies the changes stored in 'diff' to the layer 'to'
+func (r *Runtime) ApplyDiffTarStream(to string, diff io.Reader) error {
+ toLayer, err := r.getLayerID(to)
+ if err != nil {
+ return err
+ }
+ _, err = r.store.ApplyDiff(toLayer, diff)
+ return err
+}
+
// GetLayerID gets a full layer id given a full or partial id
// If the id matches a container or image, the id of the top layer is returned
// If the id matches a layer, the top layer id is returned
diff --git a/libpod/driver/driver.go b/libpod/driver/driver.go
index 717ac2a4d..f9442fa21 100644
--- a/libpod/driver/driver.go
+++ b/libpod/driver/driver.go
@@ -1,10 +1,15 @@
package driver
import (
- "github.com/containers/libpod/pkg/inspect"
cstorage "github.com/containers/storage"
)
+// Data handles the data for a storage driver
+type Data struct {
+ Name string `json:"Name"`
+ Data map[string]string `json:"Data"`
+}
+
// GetDriverName returns the name of the driver for the given store
func GetDriverName(store cstorage.Store) (string, error) {
driver, err := store.GraphDriver()
@@ -24,7 +29,7 @@ func GetDriverMetadata(store cstorage.Store, layerID string) (map[string]string,
}
// GetDriverData returns the Data struct with information of the driver used by the store
-func GetDriverData(store cstorage.Store, layerID string) (*inspect.Data, error) {
+func GetDriverData(store cstorage.Store, layerID string) (*Data, error) {
name, err := GetDriverName(store)
if err != nil {
return nil, err
@@ -33,7 +38,7 @@ func GetDriverData(store cstorage.Store, layerID string) (*inspect.Data, error)
if err != nil {
return nil, err
}
- return &inspect.Data{
+ return &Data{
Name: name,
Data: metaData,
}, nil
diff --git a/libpod/events.go b/libpod/events.go
index 13bb5bdde..be21e510a 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -1,7 +1,10 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/libpod/libpod/events"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -79,3 +82,55 @@ func (r *Runtime) Events(options events.ReadOptions) error {
}
return eventer.Read(options)
}
+
+// GetEvents reads the event log and returns events based on input filters
+func (r *Runtime) GetEvents(filters []string) ([]*events.Event, error) {
+ var (
+ logEvents []*events.Event
+ readErr error
+ )
+ eventChannel := make(chan *events.Event)
+ options := events.ReadOptions{
+ EventChannel: eventChannel,
+ Filters: filters,
+ FromStart: true,
+ Stream: false,
+ }
+ eventer, err := r.newEventer()
+ if err != nil {
+ return nil, err
+ }
+ go func() {
+ readErr = eventer.Read(options)
+ }()
+ if readErr != nil {
+ return nil, readErr
+ }
+ for e := range eventChannel {
+ logEvents = append(logEvents, e)
+ }
+ return logEvents, nil
+}
+
+// GetLastContainerEvent takes a container name or ID and an event status and returns
+// the last occurrence of the container event
+func (r *Runtime) GetLastContainerEvent(nameOrID string, containerEvent events.Status) (*events.Event, error) {
+ // check to make sure the event.Status is valid
+ if _, err := events.StringToStatus(containerEvent.String()); err != nil {
+ return nil, err
+ }
+ filters := []string{
+ fmt.Sprintf("container=%s", nameOrID),
+ fmt.Sprintf("event=%s", containerEvent),
+ "type=container",
+ }
+ containerEvents, err := r.GetEvents(filters)
+ if err != nil {
+ return nil, err
+ }
+ if len(containerEvents) < 1 {
+ return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String())
+ }
+ // return the last element in the slice
+ return containerEvents[len(containerEvents)-1], nil
+}
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 810988205..b9f01f3a5 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -2,6 +2,8 @@ package events
import (
"time"
+
+ "github.com/pkg/errors"
)
// EventerType ...
@@ -158,3 +160,12 @@ const (
// EventFilter for filtering events
type EventFilter func(*Event) bool
+
+var (
+ // ErrEventTypeBlank indicates the event log found something done by podman
+ // but it isnt likely an event
+ ErrEventTypeBlank = errors.New("event type blank")
+
+ // ErrEventNotFound indicates that the event was not found in the event log
+ ErrEventNotFound = errors.New("unable to find event")
+)
diff --git a/libpod/events/events.go b/libpod/events/events.go
index 1ec79bcd7..2bebff162 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -95,6 +95,8 @@ func StringToType(name string) (Type, error) {
return System, nil
case Volume.String():
return Volume, nil
+ case "":
+ return "", ErrEventTypeBlank
}
return "", errors.Errorf("unknown event type %q", name)
}
diff --git a/libpod/events/filters.go b/libpod/events/filters.go
index 9a64082d1..b3c5eda6e 100644
--- a/libpod/events/filters.go
+++ b/libpod/events/filters.go
@@ -1,7 +1,6 @@
package events
import (
- "fmt"
"strings"
"time"
@@ -23,7 +22,7 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error
}, nil
case "EVENT", "STATUS":
return func(e *Event) bool {
- return fmt.Sprintf("%s", e.Status) == filterValue
+ return string(e.Status) == filterValue
}, nil
case "IMAGE":
return func(e *Event) bool {
@@ -54,7 +53,7 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error
}, nil
case "TYPE":
return func(e *Event) bool {
- return fmt.Sprintf("%s", e.Type) == filterValue
+ return string(e.Type) == filterValue
}, nil
}
return nil, errors.Errorf("%s is an invalid filter", filter)
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 78a630e9a..d5bce4334 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -101,7 +101,9 @@ func (e EventJournalD) Read(options ReadOptions) error {
// We can't decode this event.
// Don't fail hard - that would make events unusable.
// Instead, log and continue.
- logrus.Errorf("Unable to decode event: %v", err)
+ if errors.Cause(err) != ErrEventTypeBlank {
+ logrus.Errorf("Unable to decode event: %v", err)
+ }
continue
}
include := true
diff --git a/libpod/events/nullout.go b/libpod/events/nullout.go
index 7d811a9c7..b11afcf80 100644
--- a/libpod/events/nullout.go
+++ b/libpod/events/nullout.go
@@ -17,7 +17,6 @@ func (e EventToNull) Read(options ReadOptions) error {
// NewNullEventer returns a new null eventer. You should only do this for
// the purposes on internal libpod testing.
func NewNullEventer() Eventer {
- var e Eventer
- e = EventToNull{}
+ e := EventToNull{}
return e
}
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index 5c48cc8ee..0338828e4 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -9,7 +9,7 @@ import (
"strings"
"time"
- "github.com/containers/libpod/pkg/inspect"
+ "github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -53,6 +53,28 @@ const (
HealthCheckStarting string = "starting"
)
+// HealthCheckResults describes the results/logs from a healthcheck
+type HealthCheckResults struct {
+ // Status healthy or unhealthy
+ Status string `json:"Status"`
+ // FailingStreak is the number of consecutive failed healthchecks
+ FailingStreak int `json:"FailingStreak"`
+ // Log describes healthcheck attempts and results
+ Log []HealthCheckLog `json:"Log"`
+}
+
+// HealthCheckLog describes the results of a single healthcheck
+type HealthCheckLog struct {
+ // Start time as string
+ Start string `json:"Start"`
+ // End time as a string
+ End string `json:"End"`
+ // Exitcode is 0 or 1
+ ExitCode int `json:"ExitCode"`
+ // Output is the stdout/stderr from the healthcheck command
+ Output string `json:"Output"`
+}
+
// hcWriteCloser allows us to use bufio as a WriteCloser
type hcWriteCloser struct {
*bufio.Writer
@@ -85,16 +107,25 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
capture bytes.Buffer
inStartPeriod bool
)
- hcStatus, err := checkHealthCheckCanBeRun(c)
- if err != nil {
- return hcStatus, err
- }
hcCommand := c.HealthCheckConfig().Test
- if len(hcCommand) > 0 && hcCommand[0] == "CMD-SHELL" {
- newCommand = []string{"sh", "-c", strings.Join(hcCommand[1:], " ")}
- } else {
+ if len(hcCommand) < 1 {
+ return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ }
+ switch hcCommand[0] {
+ case "", "NONE":
+ return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ case "CMD":
+ newCommand = hcCommand[1:]
+ case "CMD-SHELL":
+ // TODO: SHELL command from image not available in Container - use Docker default
+ newCommand = []string{"/bin/sh", "-c", strings.Join(hcCommand[1:], " ")}
+ default:
+ // command supplied on command line - pass as-is
newCommand = hcCommand
}
+ if len(newCommand) < 1 || newCommand[0] == "" {
+ return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
+ }
captureBuffer := bufio.NewWriter(&capture)
hcw := hcWriteCloser{
captureBuffer,
@@ -110,10 +141,18 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID())
timeStart := time.Now()
hcResult := HealthCheckSuccess
- hcErr := c.Exec(false, false, []string{}, newCommand, "", "", streams, 0)
+ _, hcErr := c.Exec(false, false, []string{}, newCommand, "", "", streams, 0, nil, "")
if hcErr != nil {
+ errCause := errors.Cause(hcErr)
hcResult = HealthCheckFailure
- returnCode = 1
+ if errCause == define.ErrOCIRuntimeNotFound ||
+ errCause == define.ErrOCIRuntimePermissionDenied ||
+ errCause == define.ErrOCIRuntime {
+ returnCode = 1
+ hcErr = nil
+ } else {
+ returnCode = 125
+ }
}
timeEnd := time.Now()
if c.HealthCheckConfig().StartPeriod > 0 {
@@ -148,7 +187,7 @@ func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) {
if err != nil {
return HealthCheckInternalError, err
}
- if cstate != ContainerStateRunning {
+ if cstate != define.ContainerStateRunning {
return HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {
@@ -157,8 +196,8 @@ func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) {
return HealthCheckDefined, nil
}
-func newHealthCheckLog(start, end time.Time, exitCode int, log string) inspect.HealthCheckLog {
- return inspect.HealthCheckLog{
+func newHealthCheckLog(start, end time.Time, exitCode int, log string) HealthCheckLog {
+ return HealthCheckLog{
Start: start.Format(time.RFC3339Nano),
End: end.Format(time.RFC3339Nano),
ExitCode: exitCode,
@@ -182,7 +221,7 @@ func (c *Container) updateHealthStatus(status string) error {
}
// UpdateHealthCheckLog parses the health check results and writes the log
-func (c *Container) updateHealthCheckLog(hcl inspect.HealthCheckLog, inStartPeriod bool) error {
+func (c *Container) updateHealthCheckLog(hcl HealthCheckLog, inStartPeriod bool) error {
healthCheck, err := c.GetHealthCheckLog()
if err != nil {
return err
@@ -199,7 +238,7 @@ func (c *Container) updateHealthCheckLog(hcl inspect.HealthCheckLog, inStartPeri
// increment failing streak
healthCheck.FailingStreak = healthCheck.FailingStreak + 1
// if failing streak > retries, then status to unhealthy
- if int(healthCheck.FailingStreak) >= c.HealthCheckConfig().Retries {
+ if healthCheck.FailingStreak >= c.HealthCheckConfig().Retries {
healthCheck.Status = HealthCheckUnhealthy
}
}
@@ -223,8 +262,8 @@ func (c *Container) healthCheckLogPath() string {
// GetHealthCheckLog returns HealthCheck results by reading the container's
// health check log file. If the health check log file does not exist, then
// an empty healthcheck struct is returned
-func (c *Container) GetHealthCheckLog() (inspect.HealthCheckResults, error) {
- var healthCheck inspect.HealthCheckResults
+func (c *Container) GetHealthCheckLog() (HealthCheckResults, error) {
+ var healthCheck HealthCheckResults
if _, err := os.Stat(c.healthCheckLogPath()); os.IsNotExist(err) {
return healthCheck, nil
}
diff --git a/libpod/healthcheck_linux.go b/libpod/healthcheck_linux.go
index d47a3b7cd..dca72430d 100644
--- a/libpod/healthcheck_linux.go
+++ b/libpod/healthcheck_linux.go
@@ -4,13 +4,50 @@ import (
"fmt"
"os"
"os/exec"
+ "path/filepath"
+ "strconv"
"strings"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/coreos/go-systemd/dbus"
+ godbus "github.com/godbus/dbus"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+func dbusAuthRootlessConnection(createBus func(opts ...godbus.ConnOption) (*godbus.Conn, error)) (*godbus.Conn, error) {
+ conn, err := createBus()
+ if err != nil {
+ return nil, err
+ }
+
+ methods := []godbus.Auth{godbus.AuthExternal(strconv.Itoa(rootless.GetRootlessUID()))}
+
+ err = conn.Auth(methods)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func newRootlessConnection() (*dbus.Conn, error) {
+ return dbus.NewConnection(func() (*godbus.Conn, error) {
+ return dbusAuthRootlessConnection(func(opts ...godbus.ConnOption) (*godbus.Conn, error) {
+ path := filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "systemd/private")
+ return godbus.Dial(fmt.Sprintf("unix:path=%s", path))
+ })
+ })
+}
+
+func getConnection() (*dbus.Conn, error) {
+ if rootless.IsRootless() {
+ return newRootlessConnection()
+ }
+ return dbus.NewSystemdConnection()
+}
+
// createTimer systemd timers for healthchecks of a container
func (c *Container) createTimer() error {
if c.disableHealthCheckSystemd() {
@@ -21,9 +58,13 @@ func (c *Container) createTimer() error {
return errors.Wrapf(err, "failed to get path for podman for a health check timer")
}
- var cmd = []string{"--unit", fmt.Sprintf("%s", c.ID()), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID()}
+ var cmd = []string{}
+ if rootless.IsRootless() {
+ cmd = append(cmd, "--user")
+ }
+ cmd = append(cmd, "--unit", c.ID(), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID())
- conn, err := dbus.NewSystemdConnection()
+ conn, err := getConnection()
if err != nil {
return errors.Wrapf(err, "unable to get systemd connection to add healthchecks")
}
@@ -42,7 +83,7 @@ func (c *Container) startTimer() error {
if c.disableHealthCheckSystemd() {
return nil
}
- conn, err := dbus.NewSystemdConnection()
+ conn, err := getConnection()
if err != nil {
return errors.Wrapf(err, "unable to get systemd connection to start healthchecks")
}
@@ -57,7 +98,7 @@ func (c *Container) removeTimer() error {
if c.disableHealthCheckSystemd() {
return nil
}
- conn, err := dbus.NewSystemdConnection()
+ conn, err := getConnection()
if err != nil {
return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks")
}
diff --git a/libpod/healthcheck_unsupported.go b/libpod/healthcheck_unsupported.go
index d01d1ccd4..1eccc77ba 100644
--- a/libpod/healthcheck_unsupported.go
+++ b/libpod/healthcheck_unsupported.go
@@ -2,18 +2,20 @@
package libpod
+import "github.com/containers/libpod/libpod/define"
+
// createTimer systemd timers for healthchecks of a container
func (c *Container) createTimer() error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
// startTimer starts a systemd timer for the healthchecks
func (c *Container) startTimer() error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
// removeTimer removes the systemd timer and unit files
// for the container
func (c *Container) removeTimer() error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
diff --git a/libpod/image/image.go b/libpod/image/image.go
index b965a4640..068491f28 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -38,27 +38,19 @@ import (
"github.com/sirupsen/logrus"
)
-// imageConversions is used to cache image "cast" types
-type imageConversions struct {
- imgRef types.Image
- storeRef types.ImageReference
-}
-
// Image is the primary struct for dealing with images
// It is still very much a work in progress
type Image struct {
// Adding these two structs for now but will cull when we near
// completion of this library.
- imageConversions
+ imgRef types.Image
+ storeRef types.ImageReference
inspect.ImageData
inspect.ImageResult
- inspectInfo *types.ImageInspectInfo
- InputName string
- Local bool
- //runtime *libpod.Runtime
+ inspectInfo *types.ImageInspectInfo
+ InputName string
image *storage.Image
imageruntime *Runtime
- repotagsMap map[string][]string
}
// Runtime contains the store
@@ -119,7 +111,6 @@ func setStore(options storage.StoreOptions) (storage.Store, error) {
func (ir *Runtime) newFromStorage(img *storage.Image) *Image {
image := Image{
InputName: img.ID,
- Local: true,
imageruntime: ir,
image: img,
}
@@ -132,7 +123,6 @@ func (ir *Runtime) newFromStorage(img *storage.Image) *Image {
func (ir *Runtime) NewFromLocal(name string) (*Image, error) {
image := Image{
InputName: name,
- Local: true,
imageruntime: ir,
}
localImage, err := image.getLocalImage()
@@ -153,13 +143,11 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
// We don't know if the image is local or not ... check local first
newImage := Image{
InputName: name,
- Local: false,
imageruntime: ir,
}
if !forcePull {
localImage, err := newImage.getLocalImage()
if err == nil {
- newImage.Local = true
newImage.image = localImage
return &newImage, nil
}
@@ -199,7 +187,6 @@ func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.Im
for _, name := range imageNames {
newImage := Image{
InputName: name,
- Local: true,
imageruntime: ir,
}
img, err := newImage.getLocalImage()
@@ -299,6 +286,11 @@ func (i *Image) ID() string {
return i.image.ID
}
+// IsReadOnly returns whether the image ID comes from a local store
+func (i *Image) IsReadOnly() bool {
+ return i.image.ReadOnly
+}
+
// Digest returns the image's digest
func (i *Image) Digest() digest.Digest {
return i.image.Digest
@@ -323,7 +315,7 @@ func (i *Image) Names() []string {
// RepoDigests returns a string array of repodigests associated with the image
func (i *Image) RepoDigests() ([]string, error) {
var repoDigests []string
- digest := i.Digest()
+ imageDigest := i.Digest()
for _, name := range i.Names() {
named, err := reference.ParseNormalizedNamed(name)
@@ -331,7 +323,7 @@ func (i *Image) RepoDigests() ([]string, error) {
return nil, err
}
- canonical, err := reference.WithDigest(reference.TrimNamed(named), digest)
+ canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest)
if err != nil {
return nil, err
}
@@ -439,12 +431,25 @@ func (ir *Runtime) getImage(image string) (*Image, error) {
// GetImages retrieves all images present in storage
func (ir *Runtime) GetImages() ([]*Image, error) {
+ return ir.getImages(false)
+}
+
+// GetRWImages retrieves all read/write images present in storage
+func (ir *Runtime) GetRWImages() ([]*Image, error) {
+ return ir.getImages(true)
+}
+
+// getImages retrieves all images present in storage
+func (ir *Runtime) getImages(rwOnly bool) ([]*Image, error) {
var newImages []*Image
images, err := ir.store.Images()
if err != nil {
return nil, err
}
for _, i := range images {
+ if rwOnly && i.ReadOnly {
+ continue
+ }
// iterating over these, be careful to not iterate on the literal
// pointer.
image := i
@@ -461,12 +466,16 @@ func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.Sys
if err != nil {
return "", err
}
- defer newImg.Close()
- digest := newImg.ConfigInfo().Digest
- if err = digest.Validate(); err != nil {
+ defer func() {
+ if err := newImg.Close(); err != nil {
+ logrus.Errorf("failed to close image: %q", err)
+ }
+ }()
+ imageDigest := newImg.ConfigInfo().Digest
+ if err = imageDigest.Validate(); err != nil {
return "", errors.Wrapf(err, "error getting config info")
}
- return "@" + digest.Hex(), nil
+ return "@" + imageDigest.Hex(), nil
}
// normalizedTag returns the canonical version of tag for use in Image.Names()
@@ -495,7 +504,9 @@ func normalizedTag(tag string) (reference.Named, error) {
// TagImage adds a tag to the given image
func (i *Image) TagImage(tag string) error {
- i.reloadImage()
+ if err := i.reloadImage(); err != nil {
+ return err
+ }
ref, err := normalizedTag(tag)
if err != nil {
return err
@@ -508,14 +519,18 @@ func (i *Image) TagImage(tag string) error {
if err := i.imageruntime.store.SetNames(i.ID(), tags); err != nil {
return err
}
- i.reloadImage()
- defer i.newImageEvent(events.Tag)
+ if err := i.reloadImage(); err != nil {
+ return err
+ }
+ i.newImageEvent(events.Tag)
return nil
}
// UntagImage removes a tag from the given image
func (i *Image) UntagImage(tag string) error {
- i.reloadImage()
+ if err := i.reloadImage(); err != nil {
+ return err
+ }
var newTags []string
tags := i.Names()
if !util.StringInSlice(tag, tags) {
@@ -529,8 +544,10 @@ func (i *Image) UntagImage(tag string) error {
if err := i.imageruntime.store.SetNames(i.ID(), newTags); err != nil {
return err
}
- i.reloadImage()
- defer i.newImageEvent(events.Untag)
+ if err := i.reloadImage(); err != nil {
+ return err
+ }
+ i.newImageEvent(events.Untag)
return nil
}
@@ -566,7 +583,11 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere
if err != nil {
return err
}
- defer policyContext.Destroy()
+ defer func() {
+ if err := policyContext.Destroy(); err != nil {
+ logrus.Errorf("failed to destroy policy context: %q", err)
+ }
+ }()
// Look up the source image, expecting it to be in local storage
src, err := is.Transport.ParseStoreReference(i.imageruntime.store, i.ID())
@@ -580,7 +601,7 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere
if err != nil {
return errors.Wrapf(err, "Error copying image to the remote destination")
}
- defer i.newImageEvent(events.Push)
+ i.newImageEvent(events.Push)
return nil
}
@@ -659,12 +680,8 @@ func (i *Image) Size(ctx context.Context) (*uint64, error) {
}
// DriverData gets the driver data from the store on a layer
-func (i *Image) DriverData() (*inspect.Data, error) {
- topLayer, err := i.Layer()
- if err != nil {
- return nil, err
- }
- return driver.GetDriverData(i.imageruntime.store, topLayer.ID)
+func (i *Image) DriverData() (*driver.Data, error) {
+ return driver.GetDriverData(i.imageruntime.store, i.TopLayer())
}
// Layer returns the image's top layer
@@ -693,13 +710,17 @@ func (i *Image) History(ctx context.Context) ([]*History, error) {
return nil, err
}
- // Use our layers list to find images that use one of them as its
+ // Use our layers list to find images that use any of them (or no
+ // layer, since every base layer is derived from an empty layer) as its
// topmost layer.
interestingLayers := make(map[string]bool)
- layer, err := i.imageruntime.store.Layer(i.TopLayer())
- if err != nil {
- return nil, err
+ var layer *storage.Layer
+ if i.TopLayer() != "" {
+ if layer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil {
+ return nil, err
+ }
}
+ interestingLayers[""] = true
for layer != nil {
interestingLayers[layer.ID] = true
if layer.Parent == "" {
@@ -795,27 +816,6 @@ func (i *Image) History(ctx context.Context) ([]*History, error) {
return allHistory, nil
}
-// historyLayerIDs goes through the images in store and checks if the top layer of an image
-// is the same as the parent of topLayerID
-func (i *Image) historyLayerIDs(topLayerID string, images []*Image, IDs *[]string) error {
- for _, image := range images {
- // Get the layer info of topLayerID
- layer, err := i.imageruntime.store.Layer(topLayerID)
- if err != nil {
- return errors.Wrapf(err, "error getting layer info %q", topLayerID)
- }
- // Check if the parent of layer is equal to the image's top layer
- // If so add the image ID to the list of IDs and find the parent of
- // the top layer of the image ID added to the list
- // Since we are checking for parent, each top layer can only have one parent
- if layer.Parent == image.TopLayer() {
- *IDs = append(*IDs, image.ID())
- return i.historyLayerIDs(image.TopLayer(), images, IDs)
- }
- }
- return nil
-}
-
// Dangling returns a bool if the image is "dangling"
func (i *Image) Dangling() bool {
return len(i.Names()) == 0
@@ -846,7 +846,7 @@ func (i *Image) GetLabel(ctx context.Context, label string) (string, error) {
// Annotations returns the annotations of an image
func (i *Image) Annotations(ctx context.Context) (map[string]string, error) {
- manifest, manifestType, err := i.Manifest(ctx)
+ imageManifest, manifestType, err := i.Manifest(ctx)
if err != nil {
return nil, err
}
@@ -854,7 +854,7 @@ func (i *Image) Annotations(ctx context.Context) (map[string]string, error) {
switch manifestType {
case ociv1.MediaTypeImageManifest:
var m ociv1.Manifest
- if err := json.Unmarshal(manifest, &m); err == nil {
+ if err := json.Unmarshal(imageManifest, &m); err == nil {
for k, v := range m.Annotations {
annotations[k] = v
}
@@ -997,11 +997,15 @@ func (ir *Runtime) Import(ctx context.Context, path, reference string, writer io
if err != nil {
return nil, err
}
- defer policyContext.Destroy()
+ defer func() {
+ if err := policyContext.Destroy(); err != nil {
+ logrus.Errorf("failed to destroy policy context: %q", err)
+ }
+ }()
copyOptions := getCopyOptions(sc, writer, nil, nil, signingOptions, "", nil)
dest, err := is.Transport.ParseStoreReference(ir.store, reference)
if err != nil {
- errors.Wrapf(err, "error getting image reference for %q", reference)
+ return nil, errors.Wrapf(err, "error getting image reference for %q", reference)
}
_, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
if err != nil {
@@ -1009,7 +1013,7 @@ func (ir *Runtime) Import(ctx context.Context, path, reference string, writer io
}
newImage, err := ir.NewFromLocal(reference)
if err == nil {
- defer newImage.newImageEvent(events.Import)
+ newImage.newImageEvent(events.Import)
}
return newImage, err
}
@@ -1143,13 +1147,15 @@ func areParentAndChild(parent, child *imgspecv1.Image) bool {
// GetParent returns the image ID of the parent. Return nil if a parent is not found.
func (i *Image) GetParent(ctx context.Context) (*Image, error) {
+ var childLayer *storage.Layer
images, err := i.imageruntime.GetImages()
if err != nil {
return nil, err
}
- childLayer, err := i.imageruntime.store.Layer(i.TopLayer())
- if err != nil {
- return nil, err
+ if i.TopLayer() != "" {
+ if childLayer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil {
+ return nil, err
+ }
}
// fetch the configuration for the child image
child, err := i.ociv1Image(ctx)
@@ -1161,11 +1167,23 @@ func (i *Image) GetParent(ctx context.Context) (*Image, error) {
continue
}
candidateLayer := img.TopLayer()
- // as a child, our top layer is either the candidate parent's
- // layer, or one that's derived from it, so skip over any
- // candidate image where we know that isn't the case
- if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID {
- continue
+ // as a child, our top layer, if we have one, is either the
+ // candidate parent's layer, or one that's derived from it, so
+ // skip over any candidate image where we know that isn't the
+ // case
+ if childLayer != nil {
+ // The child has at least one layer, so a parent would
+ // have a top layer that's either the same as the child's
+ // top layer or the top layer's recorded parent layer,
+ // which could be an empty value.
+ if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID {
+ continue
+ }
+ } else {
+ // The child has no layers, but the candidate does.
+ if candidateLayer != "" {
+ continue
+ }
}
// fetch the configuration for the candidate image
candidate, err := img.ociv1Image(ctx)
@@ -1204,14 +1222,22 @@ func (i *Image) getChildren(ctx context.Context, max int) ([]string, error) {
if img.ID() == i.ID() {
continue
}
- candidateLayer, err := img.Layer()
- if err != nil {
- return nil, err
- }
- // if this image's top layer is not our top layer, and is not
- // based on our top layer, we can skip it
- if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer {
- continue
+ if img.TopLayer() == "" {
+ if parentLayer != "" {
+ // this image has no layers, but we do, so
+ // it can't be derived from this one
+ continue
+ }
+ } else {
+ candidateLayer, err := img.Layer()
+ if err != nil {
+ return nil, err
+ }
+ // if this image's top layer is not our top layer, and is not
+ // based on our top layer, we can skip it
+ if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer {
+ continue
+ }
}
// fetch the configuration for the candidate image
candidate, err := img.ociv1Image(ctx)
@@ -1272,7 +1298,10 @@ func (i *Image) Comment(ctx context.Context, manifestType string) (string, error
if err != nil {
return "", err
}
- return ociv1Img.History[0].Comment, nil
+ if len(ociv1Img.History) > 0 {
+ return ociv1Img.History[0].Comment, nil
+ }
+ return "", nil
}
// Save writes a container image to the filesystem
@@ -1330,7 +1359,7 @@ func (i *Image) Save(ctx context.Context, source, format, output string, moreTag
if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil {
return errors.Wrapf(err, "unable to save %q", source)
}
- defer i.newImageEvent(events.Save)
+ i.newImageEvent(events.Save)
return nil
}
@@ -1443,6 +1472,7 @@ func GetLayersMapWithImageInfo(imageruntime *Runtime) (map[string]*LayerInfo, er
if err != nil {
return nil, err
}
+ layerInfoMap[""] = &LayerInfo{}
for _, img := range imgs {
e, ok := layerInfoMap[img.TopLayer]
if !ok {
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
index a4f8a0c9f..6ef5d321f 100644
--- a/libpod/image/prune.go
+++ b/libpod/image/prune.go
@@ -12,7 +12,7 @@ func (ir *Runtime) GetPruneImages(all bool) ([]*Image, error) {
var (
pruneImages []*Image
)
- allImages, err := ir.GetImages()
+ allImages, err := ir.GetRWImages()
if err != nil {
return nil, err
}
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index cb7411ce5..78cfe3626 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -19,8 +19,8 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/registries"
- multierror "github.com/hashicorp/go-multierror"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/hashicorp/go-multierror"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -149,6 +149,13 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
// Need to load in all the repo tags from the manifest
res := []pullRefPair{}
for _, dst := range manifest[0].RepoTags {
+ //check if image exists and gives a warning of untagging
+ localImage, err := ir.NewFromLocal(dst)
+ imageID := strings.TrimSuffix(manifest[0].Config, ".json")
+ if err == nil && imageID != localImage.ID() {
+ logrus.Errorf("the image %s already exists, renaming the old one with ID %s to empty string", dst, localImage.ID())
+ }
+
pullInfo, err := ir.getPullRefPair(srcRef, dst)
if err != nil {
return nil, err
@@ -168,7 +175,6 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
if err != nil {
return nil, errors.Wrapf(err, "error loading manifest for %q", srcRef)
}
-
var dest string
if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
// If the input image has no image.ref.name, we need to feed it a dest anyways
@@ -234,6 +240,12 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag
return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, nil)
}
+func cleanErrorMessage(err error) string {
+ errMessage := strings.TrimPrefix(errors.Cause(err).Error(), "errors:\n")
+ errMessage = strings.Split(errMessage, "\n")[0]
+ return fmt.Sprintf(" %s\n", errMessage)
+}
+
// doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead.
func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, label *string) ([]string, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "doPullImage")
@@ -243,7 +255,11 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
if err != nil {
return nil, err
}
- defer policyContext.Destroy()
+ defer func() {
+ if err := policyContext.Destroy(); err != nil {
+ logrus.Errorf("failed to destroy policy context: %q", err)
+ }
+ }()
systemRegistriesConfPath := registries.SystemRegistriesConfPath()
@@ -257,7 +273,9 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
copyOptions.SourceCtx.SystemRegistriesConfPath = systemRegistriesConfPath // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
// Print the following statement only when pulling from a docker or atomic registry
if writer != nil && (imageInfo.srcRef.Transport().Name() == DockerTransport || imageInfo.srcRef.Transport().Name() == AtomicTransport) {
- io.WriteString(writer, fmt.Sprintf("Trying to pull %s...", imageInfo.image))
+ if _, err := io.WriteString(writer, fmt.Sprintf("Trying to pull %s...\n", imageInfo.image)); err != nil {
+ return nil, err
+ }
}
// If the label is not nil, check if the label exists and if not, return err
if label != nil {
@@ -269,9 +287,9 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
_, err = cp.Image(ctx, policyContext, imageInfo.dstRef, imageInfo.srcRef, copyOptions)
if err != nil {
pullErrors = multierror.Append(pullErrors, err)
- logrus.Errorf("Error pulling image ref %s: %v", imageInfo.srcRef.StringWithinTransport(), err)
+ logrus.Debugf("Error pulling image ref %s: %v", imageInfo.srcRef.StringWithinTransport(), err)
if writer != nil {
- io.WriteString(writer, "Failed\n")
+ _, _ = io.WriteString(writer, cleanErrorMessage(err))
}
} else {
if !goal.pullAllPairs {
@@ -296,7 +314,7 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
return nil, pullErrors
}
if len(images) > 0 {
- defer ir.newImageEvent(events.Pull, images[0])
+ ir.newImageEvent(events.Pull, images[0])
}
return images, nil
}
diff --git a/libpod/image/search.go b/libpod/image/search.go
index 03a67636b..e557431c6 100644
--- a/libpod/image/search.go
+++ b/libpod/image/search.go
@@ -99,7 +99,9 @@ func SearchImages(term string, options SearchOptions) ([]SearchResult, error) {
ctx := context.Background()
for i := range registries {
- sem.Acquire(ctx, 1)
+ if err := sem.Acquire(ctx, 1); err != nil {
+ return nil, err
+ }
go searchImageInRegistryHelper(i, registries[i])
}
@@ -215,21 +217,18 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) {
return nil, errors.Wrapf(err, "incorrect value type for stars filter")
}
sFilter.Stars = stars
- break
case "is-automated":
if len(arr) == 2 && arr[1] == "false" {
sFilter.IsAutomated = types.OptionalBoolFalse
} else {
sFilter.IsAutomated = types.OptionalBoolTrue
}
- break
case "is-official":
if len(arr) == 2 && arr[1] == "false" {
sFilter.IsOfficial = types.OptionalBoolFalse
} else {
sFilter.IsOfficial = types.OptionalBoolTrue
}
- break
default:
return nil, errors.Errorf("invalid filter type %q", f)
}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 2669206df..7c4abd25d 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -3,6 +3,7 @@ package libpod
import (
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/registrar"
"github.com/containers/storage/pkg/truncindex"
"github.com/pkg/errors"
@@ -99,12 +100,12 @@ func (s *InMemoryState) SetNamespace(ns string) error {
// Container retrieves a container from its full ID
func (s *InMemoryState) Container(id string) (*Container, error) {
if id == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
ctr, ok := s.containers[id]
if !ok {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found", id)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found", id)
}
if err := s.checkNSMatch(ctr.ID(), ctr.Namespace()); err != nil {
@@ -122,7 +123,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) {
)
if idOrName == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
if s.namespace != "" {
@@ -130,7 +131,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) {
if !ok {
// We have no containers in the namespace
// Return false
- return nil, errors.Wrapf(ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
}
nameIndex = nsIndex.nameIndex
idIndex = nsIndex.idIndex
@@ -146,7 +147,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) {
fullID, err = idIndex.Get(idOrName)
if err != nil {
if err == truncindex.ErrNotExist {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
}
return nil, errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName)
}
@@ -158,7 +159,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) {
ctr, ok := s.containers[fullID]
if !ok {
// It's a pod, not a container
- return nil, errors.Wrapf(ErrNoSuchCtr, "name or ID %s is a pod, not a container", idOrName)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "name or ID %s is a pod, not a container", idOrName)
}
return ctr, nil
@@ -167,7 +168,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) {
// HasContainer checks if a container with the given ID is present in the state
func (s *InMemoryState) HasContainer(id string) (bool, error) {
if id == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
ctr, ok := s.containers[id]
@@ -182,15 +183,15 @@ func (s *InMemoryState) HasContainer(id string) (bool, error) {
// Containers in a pod cannot be added to the state
func (s *InMemoryState) AddContainer(ctr *Container) error {
if !ctr.valid {
- return errors.Wrapf(ErrCtrRemoved, "container with ID %s is not valid", ctr.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container with ID %s is not valid", ctr.ID())
}
if _, ok := s.containers[ctr.ID()]; ok {
- return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in state", ctr.ID())
+ return errors.Wrapf(define.ErrCtrExists, "container with ID %s already exists in state", ctr.ID())
}
if ctr.config.Pod != "" {
- return errors.Wrapf(ErrInvalidArg, "cannot add a container that is in a pod with AddContainer, use AddContainerToPod")
+ return errors.Wrapf(define.ErrInvalidArg, "cannot add a container that is in a pod with AddContainer, use AddContainerToPod")
}
if err := s.checkNSMatch(ctr.ID(), ctr.Namespace()); err != nil {
@@ -204,12 +205,12 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
for _, depID := range depCtrs {
depCtr, ok := s.containers[depID]
if !ok {
- return errors.Wrapf(ErrNoSuchCtr, "cannot depend on nonexistent container %s", depID)
+ return errors.Wrapf(define.ErrNoSuchCtr, "cannot depend on nonexistent container %s", depID)
} else if depCtr.config.Pod != "" {
- return errors.Wrapf(ErrInvalidArg, "cannot depend on container in a pod if not part of same pod")
+ return errors.Wrapf(define.ErrInvalidArg, "cannot depend on container in a pod if not part of same pod")
}
if depCtr.config.Namespace != ctr.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %s and cannot depend on container %s in namespace %s", ctr.ID(), ctr.config.Namespace, depID, depCtr.config.Namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and cannot depend on container %s in namespace %s", ctr.ID(), ctr.config.Namespace, depID, depCtr.config.Namespace)
}
}
@@ -270,12 +271,12 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
deps, ok := s.ctrDepends[ctr.ID()]
if ok && len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
- return errors.Wrapf(ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr)
+ return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr)
}
if _, ok := s.containers[ctr.ID()]; !ok {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID())
}
if err := s.idIndex.Delete(ctr.ID()); err != nil {
@@ -289,7 +290,7 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
if ctr.config.Namespace != "" {
nsIndex, ok := s.namespaceIndexes[ctr.config.Namespace]
if !ok {
- return errors.Wrapf(ErrInternal, "error retrieving index for namespace %q", ctr.config.Namespace)
+ return errors.Wrapf(define.ErrInternal, "error retrieving index for namespace %q", ctr.config.Namespace)
}
if err := nsIndex.idIndex.Delete(ctr.ID()); err != nil {
return errors.Wrapf(err, "error removing container %s from namespace ID index", ctr.ID())
@@ -317,13 +318,13 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
func (s *InMemoryState) UpdateContainer(ctr *Container) error {
// If the container is invalid, return error
if !ctr.valid {
- return errors.Wrapf(ErrCtrRemoved, "container with ID %s is not valid", ctr.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container with ID %s is not valid", ctr.ID())
}
// If the container does not exist, return error
if _, ok := s.containers[ctr.ID()]; !ok {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
}
return s.checkNSMatch(ctr.ID(), ctr.Namespace())
@@ -336,13 +337,13 @@ func (s *InMemoryState) UpdateContainer(ctr *Container) error {
func (s *InMemoryState) SaveContainer(ctr *Container) error {
// If the container is invalid, return error
if !ctr.valid {
- return errors.Wrapf(ErrCtrRemoved, "container with ID %s is not valid", ctr.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container with ID %s is not valid", ctr.ID())
}
// If the container does not exist, return error
if _, ok := s.containers[ctr.ID()]; !ok {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
}
return s.checkNSMatch(ctr.ID(), ctr.Namespace())
@@ -351,13 +352,13 @@ func (s *InMemoryState) SaveContainer(ctr *Container) error {
// ContainerInUse checks if the given container is being used by other containers
func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) {
if !ctr.valid {
- return nil, ErrCtrRemoved
+ return nil, define.ErrCtrRemoved
}
// If the container does not exist, return error
if _, ok := s.containers[ctr.ID()]; !ok {
ctr.valid = false
- return nil, errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
}
if err := s.checkNSMatch(ctr.ID(), ctr.Namespace()); err != nil {
@@ -389,14 +390,14 @@ func (s *InMemoryState) AllContainers() ([]*Container, error) {
// Please read the full comment on it in state.go before using it.
func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
if !ctr.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
// If the container does not exist, return error
stateCtr, ok := s.containers[ctr.ID()]
if !ok {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
}
stateCtr.config = newCfg
@@ -409,14 +410,14 @@ func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *Container
// Please read the full comment on it in state.go before using it.
func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
// If the pod does not exist, return error
statePod, ok := s.pods[pod.ID()]
if !ok {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found in state", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "pod with ID %s not found in state", pod.ID())
}
statePod.config = newCfg
@@ -427,12 +428,12 @@ func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
vol, ok := s.volumes[name]
if !ok {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no volume with name %s found", name)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no volume with name %s found", name)
}
return vol, nil
@@ -441,7 +442,7 @@ func (s *InMemoryState) Volume(name string) (*Volume, error) {
// HasVolume checks if a volume with the given name is present in the state
func (s *InMemoryState) HasVolume(name string) (bool, error) {
if name == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
_, ok := s.volumes[name]
@@ -455,11 +456,11 @@ func (s *InMemoryState) HasVolume(name string) (bool, error) {
// AddVolume adds a volume to the state
func (s *InMemoryState) AddVolume(volume *Volume) error {
if !volume.valid {
- return errors.Wrapf(ErrVolumeRemoved, "volume with name %s is not valid", volume.Name())
+ return errors.Wrapf(define.ErrVolumeRemoved, "volume with name %s is not valid", volume.Name())
}
if _, ok := s.volumes[volume.Name()]; ok {
- return errors.Wrapf(ErrVolumeExists, "volume with name %s already exists in state", volume.Name())
+ return errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists in state", volume.Name())
}
s.volumes[volume.Name()] = volume
@@ -473,12 +474,12 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error {
deps, ok := s.volumeDepends[volume.Name()]
if ok && len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
- return errors.Wrapf(ErrVolumeExists, "the following containers depend on volume %s: %s", volume.Name(), depsStr)
+ return errors.Wrapf(define.ErrVolumeExists, "the following containers depend on volume %s: %s", volume.Name(), depsStr)
}
if _, ok := s.volumes[volume.Name()]; !ok {
volume.valid = false
- return errors.Wrapf(ErrVolumeRemoved, "no volume exists in state with name %s", volume.Name())
+ return errors.Wrapf(define.ErrVolumeRemoved, "no volume exists in state with name %s", volume.Name())
}
delete(s.volumes, volume.Name())
@@ -489,13 +490,13 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error {
// VolumeInUse checks if the given volume is being used by at least one container
func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) {
if !volume.valid {
- return nil, ErrVolumeRemoved
+ return nil, define.ErrVolumeRemoved
}
// If the volume does not exist, return error
if _, ok := s.volumes[volume.Name()]; !ok {
volume.valid = false
- return nil, errors.Wrapf(ErrNoSuchVolume, "volume with name %s not found in state", volume.Name())
+ return nil, errors.Wrapf(define.ErrNoSuchVolume, "volume with name %s not found in state", volume.Name())
}
arr, ok := s.volumeDepends[volume.Name()]
@@ -519,12 +520,12 @@ func (s *InMemoryState) AllVolumes() ([]*Volume, error) {
// Pod retrieves a pod from the state from its full ID
func (s *InMemoryState) Pod(id string) (*Pod, error) {
if id == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
pod, ok := s.pods[id]
if !ok {
- return nil, errors.Wrapf(ErrNoSuchPod, "no pod with id %s found", id)
+ return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod with id %s found", id)
}
if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil {
@@ -543,7 +544,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) {
)
if idOrName == "" {
- return nil, ErrEmptyID
+ return nil, define.ErrEmptyID
}
if s.namespace != "" {
@@ -551,7 +552,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) {
if !ok {
// We have no containers in the namespace
// Return false
- return nil, errors.Wrapf(ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
}
nameIndex = nsIndex.nameIndex
idIndex = nsIndex.idIndex
@@ -567,7 +568,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) {
fullID, err = idIndex.Get(idOrName)
if err != nil {
if err == truncindex.ErrNotExist {
- return nil, errors.Wrapf(ErrNoSuchPod, "no pod found with name or ID %s", idOrName)
+ return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod found with name or ID %s", idOrName)
}
return nil, errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName)
}
@@ -579,7 +580,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) {
pod, ok := s.pods[fullID]
if !ok {
// It's a container not a pod
- return nil, errors.Wrapf(ErrNoSuchPod, "id or name %s is a container not a pod", idOrName)
+ return nil, errors.Wrapf(define.ErrNoSuchPod, "id or name %s is a container not a pod", idOrName)
}
return pod, nil
@@ -588,7 +589,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) {
// HasPod checks if a pod with the given ID is present in the state
func (s *InMemoryState) HasPod(id string) (bool, error) {
if id == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
pod, ok := s.pods[id]
@@ -602,11 +603,11 @@ func (s *InMemoryState) HasPod(id string) (bool, error) {
// PodHasContainer checks if the given pod has a container with the given ID
func (s *InMemoryState) PodHasContainer(pod *Pod, ctrID string) (bool, error) {
if !pod.valid {
- return false, errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID())
+ return false, errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID())
}
if ctrID == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil {
@@ -616,7 +617,7 @@ func (s *InMemoryState) PodHasContainer(pod *Pod, ctrID string) (bool, error) {
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
pod.valid = false
- return false, errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in state", pod.ID())
+ return false, errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in state", pod.ID())
}
_, ok = podCtrs[ctrID]
@@ -626,7 +627,7 @@ func (s *InMemoryState) PodHasContainer(pod *Pod, ctrID string) (bool, error) {
// PodContainersByID returns the IDs of all containers in the given pod
func (s *InMemoryState) PodContainersByID(pod *Pod) ([]string, error) {
if !pod.valid {
- return nil, errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID())
+ return nil, errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID())
}
if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil {
@@ -636,7 +637,7 @@ func (s *InMemoryState) PodContainersByID(pod *Pod) ([]string, error) {
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
pod.valid = false
- return nil, errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in state", pod.ID())
+ return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in state", pod.ID())
}
length := len(podCtrs)
@@ -655,7 +656,7 @@ func (s *InMemoryState) PodContainersByID(pod *Pod) ([]string, error) {
// PodContainers retrieves the containers from a pod
func (s *InMemoryState) PodContainers(pod *Pod) ([]*Container, error) {
if !pod.valid {
- return nil, errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID())
+ return nil, errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID())
}
if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil {
@@ -665,7 +666,7 @@ func (s *InMemoryState) PodContainers(pod *Pod) ([]*Container, error) {
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
pod.valid = false
- return nil, errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in state", pod.ID())
+ return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in state", pod.ID())
}
length := len(podCtrs)
@@ -684,7 +685,7 @@ func (s *InMemoryState) PodContainers(pod *Pod) ([]*Container, error) {
// AddPod adds a given pod to the state
func (s *InMemoryState) AddPod(pod *Pod) error {
if !pod.valid {
- return errors.Wrapf(ErrPodRemoved, "pod %s is not valid and cannot be added", pod.ID())
+ return errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid and cannot be added", pod.ID())
}
if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil {
@@ -692,11 +693,11 @@ func (s *InMemoryState) AddPod(pod *Pod) error {
}
if _, ok := s.pods[pod.ID()]; ok {
- return errors.Wrapf(ErrPodExists, "pod with ID %s already exists in state", pod.ID())
+ return errors.Wrapf(define.ErrPodExists, "pod with ID %s already exists in state", pod.ID())
}
if _, ok := s.podContainers[pod.ID()]; ok {
- return errors.Wrapf(ErrPodExists, "pod with ID %s already exists in state", pod.ID())
+ return errors.Wrapf(define.ErrPodExists, "pod with ID %s already exists in state", pod.ID())
}
if err := s.nameIndex.Reserve(pod.Name(), pod.ID()); err != nil {
@@ -746,15 +747,15 @@ func (s *InMemoryState) RemovePod(pod *Pod) error {
if _, ok := s.pods[pod.ID()]; !ok {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
}
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
}
if len(podCtrs) != 0 {
- return errors.Wrapf(ErrCtrExists, "pod %s is not empty and cannot be removed", pod.ID())
+ return errors.Wrapf(define.ErrCtrExists, "pod %s is not empty and cannot be removed", pod.ID())
}
if err := s.idIndex.Delete(pod.ID()); err != nil {
@@ -767,7 +768,7 @@ func (s *InMemoryState) RemovePod(pod *Pod) error {
if pod.config.Namespace != "" {
nsIndex, ok := s.namespaceIndexes[pod.config.Namespace]
if !ok {
- return errors.Wrapf(ErrInternal, "error retrieving index for namespace %q", pod.config.Namespace)
+ return errors.Wrapf(define.ErrInternal, "error retrieving index for namespace %q", pod.config.Namespace)
}
if err := nsIndex.idIndex.Delete(pod.ID()); err != nil {
return errors.Wrapf(err, "error removing container %s from namespace ID index", pod.ID())
@@ -784,7 +785,7 @@ func (s *InMemoryState) RemovePod(pod *Pod) error {
// Will only remove containers if no dependencies outside of the pod are present
func (s *InMemoryState) RemovePodContainers(pod *Pod) error {
if !pod.valid {
- return errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID())
+ return errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID())
}
if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil {
@@ -795,7 +796,7 @@ func (s *InMemoryState) RemovePodContainers(pod *Pod) error {
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
}
// Go through container dependencies. Check to see if any are outside the pod.
@@ -804,7 +805,7 @@ func (s *InMemoryState) RemovePodContainers(pod *Pod) error {
if ok {
for _, dep := range ctrDeps {
if _, ok := podCtrs[dep]; !ok {
- return errors.Wrapf(ErrCtrExists, "container %s has dependency %s outside of pod %s", ctr, dep, pod.ID())
+ return errors.Wrapf(define.ErrCtrExists, "container %s has dependency %s outside of pod %s", ctr, dep, pod.ID())
}
}
}
@@ -830,18 +831,18 @@ func (s *InMemoryState) RemovePodContainers(pod *Pod) error {
// state
func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error {
if !pod.valid {
- return errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID())
+ return errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID())
}
if !ctr.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", ctr.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", ctr.ID())
}
if ctr.config.Pod != pod.ID() {
- return errors.Wrapf(ErrInvalidArg, "container %s is not in pod %s", ctr.ID(), pod.ID())
+ return errors.Wrapf(define.ErrInvalidArg, "container %s is not in pod %s", ctr.ID(), pod.ID())
}
if ctr.config.Namespace != pod.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s",
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s",
ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace)
}
@@ -853,12 +854,12 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error {
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
pod.valid = false
- return errors.Wrapf(ErrPodRemoved, "pod %s not found in state", pod.ID())
+ return errors.Wrapf(define.ErrPodRemoved, "pod %s not found in state", pod.ID())
}
// Is the container already in the pod?
if _, ok = podCtrs[ctr.ID()]; ok {
- return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in pod %s", ctr.ID(), pod.ID())
+ return errors.Wrapf(define.ErrCtrExists, "container with ID %s already exists in pod %s", ctr.ID(), pod.ID())
}
// There are potential race conditions with this
@@ -867,20 +868,20 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error {
depCtrs := ctr.Dependencies()
for _, depCtr := range depCtrs {
if _, ok = s.containers[depCtr]; !ok {
- return errors.Wrapf(ErrNoSuchCtr, "cannot depend on nonexistent container %s", depCtr)
+ return errors.Wrapf(define.ErrNoSuchCtr, "cannot depend on nonexistent container %s", depCtr)
}
depCtrStruct, ok := podCtrs[depCtr]
if !ok {
- return errors.Wrapf(ErrInvalidArg, "cannot depend on container %s as it is not in pod %s", depCtr, pod.ID())
+ return errors.Wrapf(define.ErrInvalidArg, "cannot depend on container %s as it is not in pod %s", depCtr, pod.ID())
}
if depCtrStruct.config.Namespace != ctr.config.Namespace {
- return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %s and cannot depend on container %s in namespace %s", ctr.ID(), ctr.config.Namespace, depCtr, depCtrStruct.config.Namespace)
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and cannot depend on container %s in namespace %s", ctr.ID(), ctr.config.Namespace, depCtr, depCtrStruct.config.Namespace)
}
}
// Add container to state
if _, ok = s.containers[ctr.ID()]; ok {
- return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in state", ctr.ID())
+ return errors.Wrapf(define.ErrCtrExists, "container with ID %s already exists in state", ctr.ID())
}
if err := s.nameIndex.Reserve(ctr.Name(), ctr.ID()); err != nil {
@@ -928,10 +929,10 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error {
// The container is also removed from the state
func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
if !pod.valid {
- return errors.Wrapf(ErrPodRemoved, "pod %s is not valid and containers cannot be removed", pod.ID())
+ return errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid and containers cannot be removed", pod.ID())
}
if !ctr.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid and cannot be removed from the pod", ctr.ID())
+ return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid and cannot be removed from the pod", ctr.ID())
}
if err := s.checkNSMatch(ctr.ID(), ctr.Namespace()); err != nil {
@@ -942,30 +943,30 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
deps, ok := s.ctrDepends[ctr.ID()]
if ok && len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
- return errors.Wrapf(ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr)
+ return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr)
}
// Retrieve pod containers
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
pod.valid = false
- return errors.Wrapf(ErrPodRemoved, "pod %s has been removed", pod.ID())
+ return errors.Wrapf(define.ErrPodRemoved, "pod %s has been removed", pod.ID())
}
// Does the container exist?
if _, ok := s.containers[ctr.ID()]; !ok {
ctr.valid = false
- return errors.Wrapf(ErrNoSuchCtr, "container %s does not exist in state", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in state", ctr.ID())
}
// Is the container in the pod?
if _, ok := podCtrs[ctr.ID()]; !ok {
- return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in pod %s", ctr.ID(), pod.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in pod %s", ctr.ID(), pod.ID())
}
// Remove container from state
if _, ok := s.containers[ctr.ID()]; !ok {
- return errors.Wrapf(ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID())
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID())
}
if err := s.idIndex.Delete(ctr.ID()); err != nil {
@@ -980,7 +981,7 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
if ctr.config.Namespace != "" {
nsIndex, ok := s.namespaceIndexes[ctr.config.Namespace]
if !ok {
- return errors.Wrapf(ErrInternal, "error retrieving index for namespace %q", ctr.config.Namespace)
+ return errors.Wrapf(define.ErrInternal, "error retrieving index for namespace %q", ctr.config.Namespace)
}
if err := nsIndex.idIndex.Delete(ctr.ID()); err != nil {
return errors.Wrapf(err, "error removing container %s from namespace ID index", ctr.ID())
@@ -1001,7 +1002,7 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
// This is a no-op as there is no backing store
func (s *InMemoryState) UpdatePod(pod *Pod) error {
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil {
@@ -1010,7 +1011,7 @@ func (s *InMemoryState) UpdatePod(pod *Pod) error {
if _, ok := s.pods[pod.ID()]; !ok {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
}
return nil
@@ -1020,7 +1021,7 @@ func (s *InMemoryState) UpdatePod(pod *Pod) error {
// This is a no-op at there is no backing store
func (s *InMemoryState) SavePod(pod *Pod) error {
if !pod.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil {
@@ -1029,7 +1030,7 @@ func (s *InMemoryState) SavePod(pod *Pod) error {
if _, ok := s.pods[pod.ID()]; !ok {
pod.valid = false
- return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
+ return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
}
return nil
@@ -1133,7 +1134,7 @@ func (s *InMemoryState) removeCtrFromVolDependsMap(depCtrID, volName string) {
// namespaces.
func (s *InMemoryState) checkNSMatch(id, ns string) error {
if s.namespace != "" && s.namespace != ns {
- return errors.Wrapf(ErrNSMismatch, "cannot access %s as it is in namespace %q and we are in namespace %q",
+ return errors.Wrapf(define.ErrNSMismatch, "cannot access %s as it is in namespace %q and we are in namespace %q",
id, ns, s.namespace)
}
return nil
diff --git a/libpod/info.go b/libpod/info.go
index b42f64a1f..4a89fa648 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -19,12 +19,6 @@ import (
"github.com/pkg/errors"
)
-// InfoData holds the info type, i.e store, host etc and the data for each type
-type InfoData struct {
- Type string
- Data map[string]interface{}
-}
-
// top-level "host" info
func (r *Runtime) hostInfo() (map[string]interface{}, error) {
// lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime
@@ -47,12 +41,12 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) {
hostDistributionInfo := r.GetHostDistributionInfo()
info["Conmon"] = map[string]interface{}{
"path": r.conmonPath,
- "package": r.ociRuntime.conmonPackage(),
+ "package": r.defaultOCIRuntime.conmonPackage(),
"version": conmonVersion,
}
info["OCIRuntime"] = map[string]interface{}{
- "path": r.ociRuntime.path,
- "package": r.ociRuntime.pathPackage(),
+ "path": r.defaultOCIRuntime.path,
+ "package": r.defaultOCIRuntime.pathPackage(),
"version": ociruntimeVersion,
}
info["Distribution"] = map[string]interface{}{
@@ -190,12 +184,12 @@ func (r *Runtime) GetConmonVersion() (string, error) {
// GetOCIRuntimePath returns the path to the OCI Runtime Path the runtime is using
func (r *Runtime) GetOCIRuntimePath() string {
- return r.ociRuntimePath.Paths[0]
+ return r.defaultOCIRuntime.path
}
// GetOCIRuntimeVersion returns a string representation of the oci runtimes version
func (r *Runtime) GetOCIRuntimeVersion() (string, error) {
- output, err := utils.ExecCmd(r.ociRuntimePath.Paths[0], "--version")
+ output, err := utils.ExecCmd(r.GetOCIRuntimePath(), "--version")
if err != nil {
return "", err
}
diff --git a/libpod/kube.go b/libpod/kube.go
index c5fd9d75c..084a3df4f 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -1,12 +1,13 @@
package libpod
import (
- "fmt"
"math/rand"
+ "os"
"strconv"
"strings"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/lookup"
"github.com/containers/libpod/pkg/util"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -15,7 +16,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -131,27 +131,44 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
var (
podContainers []v1.Container
)
+ deDupPodVolumes := make(map[string]*v1.Volume)
first := true
for _, ctr := range containers {
if !ctr.IsInfra() {
- result, err := containerToV1Container(ctr)
+ ctr, volumes, err := containerToV1Container(ctr)
if err != nil {
return nil, err
}
+
+ // Since port bindings for the pod are handled by the
+ // infra container, wipe them here.
+ ctr.Ports = nil
+
// We add the original port declarations from the libpod infra container
// to the first kubernetes container description because otherwise we loose
// the original container/port bindings.
if first && len(ports) > 0 {
- result.Ports = ports
+ ctr.Ports = ports
first = false
}
- podContainers = append(podContainers, result)
+ podContainers = append(podContainers, ctr)
+ // Deduplicate volumes, so if containers in the pod share a volume, it's only
+ // listed in the volumes section once
+ for _, vol := range volumes {
+ vol := vol
+ deDupPodVolumes[vol.Name] = &vol
+ }
}
}
- return addContainersToPodObject(podContainers, p.Name()), nil
+ podVolumes := make([]v1.Volume, 0, len(deDupPodVolumes))
+ for _, vol := range deDupPodVolumes {
+ podVolumes = append(podVolumes, *vol)
+ }
+
+ return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name()), nil
}
-func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod {
+func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string) *v1.Pod {
tm := v12.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
@@ -162,7 +179,7 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
labels["app"] = removeUnderscores(podName)
om := v12.ObjectMeta{
// The name of the pod is container_name-libpod
- Name: fmt.Sprintf("%s", removeUnderscores(podName)),
+ Name: removeUnderscores(podName),
Labels: labels,
// CreationTimestamp seems to be required, so adding it; in doing so, the timestamp
// will reflect time this is run (not container create time) because the conversion
@@ -171,6 +188,7 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
}
ps := v1.PodSpec{
Containers: containers,
+ Volumes: volumes,
}
p := v1.Pod{
TypeMeta: tm,
@@ -184,56 +202,58 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
// for a single container. we "insert" that container description in a pod.
func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
var containers []v1.Container
- result, err := containerToV1Container(ctr)
+ kubeCtr, kubeVols, err := containerToV1Container(ctr)
if err != nil {
return nil, err
}
- containers = append(containers, result)
- return addContainersToPodObject(containers, ctr.Name()), nil
+ containers = append(containers, kubeCtr)
+ return addContainersAndVolumesToPodObject(containers, kubeVols, ctr.Name()), nil
}
// containerToV1Container converts information we know about a libpod container
// to a V1.Container specification.
-func containerToV1Container(c *Container) (v1.Container, error) {
+func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
kubeContainer := v1.Container{}
+ kubeVolumes := []v1.Volume{}
kubeSec, err := generateKubeSecurityContext(c)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
if len(c.config.Spec.Linux.Devices) > 0 {
// TODO Enable when we can support devices and their names
devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
kubeContainer.VolumeDevices = devices
- return kubeContainer, errors.Wrapf(ErrNotImplemented, "linux devices")
+ return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices")
}
if len(c.config.UserVolumes) > 0 {
// TODO When we until we can resolve what the volume name should be, this is disabled
// Volume names need to be coordinated "globally" in the kube files.
- volumes, err := libpodMountsToKubeVolumeMounts(c)
+ volumeMounts, volumes, err := libpodMountsToKubeVolumeMounts(c)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
- kubeContainer.VolumeMounts = volumes
+ kubeContainer.VolumeMounts = volumeMounts
+ kubeVolumes = append(kubeVolumes, volumes...)
}
envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
portmappings, err := c.PortMappings()
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
ports, err := ocicniPortMappingToContainerPort(portmappings)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
containerCommands := c.Command()
@@ -257,7 +277,7 @@ func containerToV1Container(c *Container) (v1.Container, error) {
kubeContainer.StdinOnce = false
kubeContainer.TTY = c.config.Spec.Process.Terminal
- return kubeContainer, nil
+ return kubeContainer, kubeVolumes, nil
}
// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
@@ -303,52 +323,82 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
return envVars, nil
}
-// Is this worth it?
-func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceList) { //nolint
- // It does not appear we can properly calculate CPU resources from the information
- // we know in libpod. Libpod knows CPUs by time, shares, etc.
+// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
+func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, error) {
+ var vms []v1.VolumeMount
+ var vos []v1.Volume
- // We also only know about a memory limit; no memory minimum
- maxResources := make(map[v1.ResourceName]resource.Quantity)
- minResources := make(map[v1.ResourceName]resource.Quantity)
- config := c.Config()
- maxMem := config.Spec.Linux.Resources.Memory.Limit
+ // TjDO when named volumes are supported in play kube, also parse named volumes here
+ _, mounts := c.sortUserVolumes(c.config.Spec)
+ for _, m := range mounts {
+ vm, vo, err := generateKubeVolumeMount(m)
+ if err != nil {
+ return vms, vos, err
+ }
+ vms = append(vms, vm)
+ vos = append(vos, vo)
+ }
+ return vms, vos, nil
+}
- _ = maxMem
+// generateKubeVolumeMount takes a user specfied mount and returns
+// a kubernetes VolumeMount (to be added to the container) and a kubernetes Volume
+// (to be added to the pod)
+func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) {
+ vm := v1.VolumeMount{}
+ vo := v1.Volume{}
- return maxResources, minResources
+ name, err := convertVolumePathToName(m.Source)
+ if err != nil {
+ return vm, vo, err
+ }
+ vm.Name = name
+ vm.MountPath = m.Destination
+ if util.StringInSlice("ro", m.Options) {
+ vm.ReadOnly = true
+ }
+
+ vo.Name = name
+ vo.HostPath = &v1.HostPathVolumeSource{}
+ vo.HostPath.Path = m.Source
+ isDir, err := isHostPathDirectory(m.Source)
+ // neither a directory or a file lives here, default to creating a directory
+ // TODO should this be an error instead?
+ var hostPathType v1.HostPathType
+ if err != nil {
+ hostPathType = v1.HostPathDirectoryOrCreate
+ } else if isDir {
+ hostPathType = v1.HostPathDirectory
+ } else {
+ hostPathType = v1.HostPathFile
+ }
+ vo.HostPath.Type = &hostPathType
+
+ return vm, vo, nil
}
-func generateKubeVolumeMount(hostSourcePath string, mounts []specs.Mount) (v1.VolumeMount, error) {
- vm := v1.VolumeMount{}
- for _, m := range mounts {
- if m.Source == hostSourcePath {
- // TODO Name is not provided and is required by Kube; therefore, this is disabled earlier
- //vm.Name =
- vm.MountPath = m.Source
- vm.SubPath = m.Destination
- if util.StringInSlice("ro", m.Options) {
- vm.ReadOnly = true
- }
- return vm, nil
- }
+func isHostPathDirectory(hostPathSource string) (bool, error) {
+ info, err := os.Stat(hostPathSource)
+ if err != nil {
+ return false, err
}
- return vm, errors.New("unable to find mount source")
+ return info.Mode().IsDir(), nil
}
-// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
-func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) {
- // At this point, I dont think we can distinguish between the default
- // volume mounts and user added ones. For now, we pass them all.
- var vms []v1.VolumeMount
- for _, hostSourcePath := range c.config.UserVolumes {
- vm, err := generateKubeVolumeMount(hostSourcePath, c.config.Spec.Mounts)
- if err != nil {
- continue
+func convertVolumePathToName(hostSourcePath string) (string, error) {
+ if len(hostSourcePath) == 0 {
+ return "", errors.Errorf("hostSourcePath must be specified to generate volume name")
+ }
+ if len(hostSourcePath) == 1 {
+ if hostSourcePath != "/" {
+ return "", errors.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
}
- vms = append(vms, vm)
+ // add special case name
+ return "root", nil
}
- return vms, nil
+ // First, trim trailing slashes, then replace slashes with dashes.
+ // Thus, /mnt/data/ will become mnt-data
+ return strings.Replace(strings.Trim(hostSourcePath, "/"), "/", "-", -1), nil
}
func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
@@ -360,16 +410,14 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
// those indicate a dropped cap
for _, capability := range defaultCaps {
if !util.StringInSlice(capability, containerCaps) {
- cap := v1.Capability(capability)
- drop = append(drop, cap)
+ drop = append(drop, v1.Capability(capability))
}
}
// Find caps in the container but not in the defaults; those indicate
// an added cap
for _, capability := range containerCaps {
if !util.StringInSlice(capability, defaultCaps) {
- cap := v1.Capability(capability)
- add = append(add, cap)
+ add = append(add, v1.Capability(capability))
}
}
diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go
new file mode 100644
index 000000000..e50d67321
--- /dev/null
+++ b/libpod/lock/file/file_lock.go
@@ -0,0 +1,175 @@
+package file
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "syscall"
+
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// FileLocks is a struct enabling POSIX lock locking in a shared memory
+// segment.
+type FileLocks struct { // nolint
+ lockPath string
+ valid bool
+}
+
+// CreateFileLock sets up a directory containing the various lock files.
+func CreateFileLock(path string) (*FileLocks, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path)
+ }
+ if err := os.MkdirAll(path, 0711); err != nil {
+ return nil, errors.Wrapf(err, "cannot create %s", path)
+ }
+
+ locks := new(FileLocks)
+ locks.lockPath = path
+ locks.valid = true
+
+ return locks, nil
+}
+
+// OpenFileLock opens an existing directory with the lock files.
+func OpenFileLock(path string) (*FileLocks, error) {
+ _, err := os.Stat(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "accessing directory %s", path)
+ }
+
+ locks := new(FileLocks)
+ locks.lockPath = path
+ locks.valid = true
+
+ return locks, nil
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *FileLocks) Close() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ err := os.RemoveAll(locks.lockPath)
+ if err != nil {
+ return errors.Wrapf(err, "deleting directory %s", locks.lockPath)
+ }
+ return nil
+}
+
+func (locks *FileLocks) getLockPath(lck uint32) string {
+ return filepath.Join(locks.lockPath, strconv.FormatInt(int64(lck), 10))
+}
+
+// AllocateLock allocates a lock and returns the index of the lock that was allocated.
+func (locks *FileLocks) AllocateLock() (uint32, error) {
+ if !locks.valid {
+ return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ id := uint32(0)
+ for ; ; id++ {
+ path := locks.getLockPath(id)
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ if os.IsExist(err) {
+ continue
+ }
+ return 0, errors.Wrapf(err, "creating lock file")
+ }
+ f.Close()
+ break
+ }
+ return id, nil
+}
+
+// AllocateGivenLock allocates the given lock from the shared-memory
+// segment for use by a container or pod.
+// If the lock is already in use or the index is invalid an error will be
+// returned.
+func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return errors.Wrapf(err, "error creating lock %d", lck)
+ }
+ f.Close()
+
+ return nil
+}
+
+// DeallocateLock frees a lock in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given lock must be already allocated, or an error will be returned.
+func (locks *FileLocks) DeallocateLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ if err := os.Remove(locks.getLockPath(lck)); err != nil {
+ return errors.Wrapf(err, "deallocating lock %d", lck)
+ }
+ return nil
+}
+
+// DeallocateAllLocks frees all locks so they can be reallocated to
+// other containers and pods.
+func (locks *FileLocks) DeallocateAllLocks() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ files, err := ioutil.ReadDir(locks.lockPath)
+ if err != nil {
+ return errors.Wrapf(err, "error reading directory %s", locks.lockPath)
+ }
+ var lastErr error
+ for _, f := range files {
+ p := filepath.Join(locks.lockPath, f.Name())
+ err := os.Remove(p)
+ if err != nil {
+ lastErr = err
+ logrus.Errorf("deallocating lock %s", p)
+ }
+ }
+ return lastErr
+}
+
+// LockFileLock locks the given lock.
+func (locks *FileLocks) LockFileLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ l, err := storage.GetLockfile(locks.getLockPath(lck))
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring lock")
+ }
+
+ l.Lock()
+ return nil
+}
+
+// UnlockFileLock unlocks the given lock.
+func (locks *FileLocks) UnlockFileLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ l, err := storage.GetLockfile(locks.getLockPath(lck))
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring lock")
+ }
+
+ l.Unlock()
+ return nil
+}
diff --git a/libpod/lock/file/file_lock_test.go b/libpod/lock/file/file_lock_test.go
new file mode 100644
index 000000000..6320d6b70
--- /dev/null
+++ b/libpod/lock/file/file_lock_test.go
@@ -0,0 +1,74 @@
+package file
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Test that creating and destroying locks work
+func TestCreateAndDeallocate(t *testing.T) {
+ d, err := ioutil.TempDir("", "filelock")
+ assert.NoError(t, err)
+ defer os.RemoveAll(d)
+
+ l, err := OpenFileLock(filepath.Join(d, "locks"))
+ assert.Error(t, err)
+
+ l, err = CreateFileLock(filepath.Join(d, "locks"))
+ assert.NoError(t, err)
+
+ lock, err := l.AllocateLock()
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.Error(t, err)
+
+ err = l.DeallocateLock(lock)
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.NoError(t, err)
+
+ err = l.DeallocateAllLocks()
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.NoError(t, err)
+
+ err = l.DeallocateAllLocks()
+ assert.NoError(t, err)
+}
+
+// Test that creating and destroying locks work
+func TestLockAndUnlock(t *testing.T) {
+ d, err := ioutil.TempDir("", "filelock")
+ assert.NoError(t, err)
+ defer os.RemoveAll(d)
+
+ l, err := CreateFileLock(filepath.Join(d, "locks"))
+ assert.NoError(t, err)
+
+ lock, err := l.AllocateLock()
+ assert.NoError(t, err)
+
+ err = l.LockFileLock(lock)
+ assert.NoError(t, err)
+
+ lslocks, err := exec.LookPath("lslocks")
+ if err == nil {
+ lockPath := l.getLockPath(lock)
+ out, err := exec.Command(lslocks, "--json", "-p", fmt.Sprintf("%d", os.Getpid())).CombinedOutput()
+ assert.NoError(t, err)
+
+ assert.Contains(t, string(out), lockPath)
+ }
+
+ err = l.UnlockFileLock(lock)
+ assert.NoError(t, err)
+}
diff --git a/libpod/lock/file_lock_manager.go b/libpod/lock/file_lock_manager.go
new file mode 100644
index 000000000..8a4d939d3
--- /dev/null
+++ b/libpod/lock/file_lock_manager.go
@@ -0,0 +1,110 @@
+package lock
+
+import (
+ "github.com/containers/libpod/libpod/lock/file"
+)
+
+// FileLockManager manages shared memory locks.
+type FileLockManager struct {
+ locks *file.FileLocks
+}
+
+// NewFileLockManager makes a new FileLockManager at the specified directory.
+func NewFileLockManager(lockPath string) (Manager, error) {
+ locks, err := file.CreateFileLock(lockPath)
+ if err != nil {
+ return nil, err
+ }
+
+ manager := new(FileLockManager)
+ manager.locks = locks
+
+ return manager, nil
+}
+
+// OpenFileLockManager opens an existing FileLockManager at the specified directory.
+func OpenFileLockManager(path string) (Manager, error) {
+ locks, err := file.OpenFileLock(path)
+ if err != nil {
+ return nil, err
+ }
+
+ manager := new(FileLockManager)
+ manager.locks = locks
+
+ return manager, nil
+}
+
+// AllocateLock allocates a new lock from the manager.
+func (m *FileLockManager) AllocateLock() (Locker, error) {
+ semIndex, err := m.locks.AllocateLock()
+ if err != nil {
+ return nil, err
+ }
+
+ lock := new(FileLock)
+ lock.lockID = semIndex
+ lock.manager = m
+
+ return lock, nil
+}
+
+// AllocateAndRetrieveLock allocates the lock with the given ID and returns it.
+// If the lock is already allocated, error.
+func (m *FileLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
+ lock := new(FileLock)
+ lock.lockID = id
+ lock.manager = m
+
+ if err := m.locks.AllocateGivenLock(id); err != nil {
+ return nil, err
+ }
+
+ return lock, nil
+}
+
+// RetrieveLock retrieves a lock from the manager given its ID.
+func (m *FileLockManager) RetrieveLock(id uint32) (Locker, error) {
+ lock := new(FileLock)
+ lock.lockID = id
+ lock.manager = m
+
+ return lock, nil
+}
+
+// FreeAllLocks frees all locks in the manager.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *FileLockManager) FreeAllLocks() error {
+ return m.locks.DeallocateAllLocks()
+}
+
+// FileLock is an individual shared memory lock.
+type FileLock struct {
+ lockID uint32
+ manager *FileLockManager
+}
+
+// ID returns the ID of the lock.
+func (l *FileLock) ID() uint32 {
+ return l.lockID
+}
+
+// Lock acquires the lock.
+func (l *FileLock) Lock() {
+ if err := l.manager.locks.LockFileLock(l.lockID); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Unlock releases the lock.
+func (l *FileLock) Unlock() {
+ if err := l.manager.locks.UnlockFileLock(l.lockID); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Free releases the lock, allowing it to be reused.
+func (l *FileLock) Free() error {
+ return l.manager.locks.DeallocateLock(l.lockID)
+}
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index 047d3c417..fbb3f57cc 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -413,7 +413,7 @@ int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) {
// Returns 0 on success, negative ERRNO values on failure
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
bitmap_t test_map;
- int bitmap_index, index_in_bitmap, ret_code, i;
+ int bitmap_index, index_in_bitmap, ret_code;
if (shm == NULL) {
return -1 * EINVAL;
@@ -500,7 +500,7 @@ int32_t deallocate_all_semaphores(shm_struct_t *shm) {
// subsequently realize they have been removed).
// Returns 0 on success, -1 on failure
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
- int bitmap_index, index_in_bitmap, ret_code;
+ int bitmap_index, index_in_bitmap;
if (shm == NULL) {
return -1 * EINVAL;
@@ -522,7 +522,7 @@ int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
// subsequently realize they have been removed).
// Returns 0 on success, -1 on failure
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
- int bitmap_index, index_in_bitmap, ret_code;
+ int bitmap_index, index_in_bitmap;
if (shm == NULL) {
return -1 * EINVAL;
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index c21e9a221..322e92a8f 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -1,6 +1,9 @@
+// +build linux,cgo
+
package shm
// #cgo LDFLAGS: -lrt -lpthread
+// #cgo CFLAGS: -Wall -Werror
// #include <stdlib.h>
// #include "shm_lock.h"
// const uint32_t bitmap_size_c = BITMAP_SIZE;
@@ -19,7 +22,7 @@ var (
// BitmapSize is the size of the bitmap used when managing SHM locks.
// an SHM lock manager's max locks will be rounded up to a multiple of
// this number.
- BitmapSize uint32 = uint32(C.bitmap_size_c)
+ BitmapSize = uint32(C.bitmap_size_c)
)
// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 759f8178a..8796b43f4 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -32,9 +32,6 @@ typedef struct shm_struct {
lock_group_t locks[];
} shm_struct_t;
-static size_t compute_shm_size(uint32_t num_bitmaps);
-static int take_mutex(pthread_mutex_t *mutex);
-static int release_mutex(pthread_mutex_t *mutex);
shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code);
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
diff --git a/libpod/lock/shm/shm_lock_nocgo.go b/libpod/lock/shm/shm_lock_nocgo.go
new file mode 100644
index 000000000..ea1488c90
--- /dev/null
+++ b/libpod/lock/shm/shm_lock_nocgo.go
@@ -0,0 +1,102 @@
+// +build linux,!cgo
+
+package shm
+
+import (
+ "github.com/sirupsen/logrus"
+)
+
+// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
+// segment.
+type SHMLocks struct {
+}
+
+// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
+// semaphores, and returns a struct that can be used to operate on those locks.
+// numLocks must not be 0, and may be rounded up to a multiple of the bitmap
+// size used by the underlying implementation.
+func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ logrus.Error("locks are not supported without cgo")
+ return &SHMLocks{}, nil
+}
+
+// OpenSHMLock opens an existing shared-memory segment holding a given number of
+// POSIX semaphores. numLocks must match the number of locks the shared memory
+// segment was created with.
+func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ logrus.Error("locks are not supported without cgo")
+ return &SHMLocks{}, nil
+}
+
+// GetMaxLocks returns the maximum number of locks in the SHM
+func (locks *SHMLocks) GetMaxLocks() uint32 {
+ logrus.Error("locks are not supported without cgo")
+ return 0
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *SHMLocks) Close() error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
+// by a container or pod.
+// Returns the index of the semaphore that was allocated.
+// Allocations past the maximum number of locks given when the SHM segment was
+// created will result in an error, and no semaphore will be allocated.
+func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
+ logrus.Error("locks are not supported without cgo")
+ return 0, nil
+}
+
+// AllocateGivenSemaphore allocates the given semaphore from the shared-memory
+// segment for use by a container or pod.
+// If the semaphore is already in use or the index is invalid an error will be
+// returned.
+func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given semaphore must be already allocated, or an error will be returned.
+func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
+// other containers and pods.
+func (locks *SHMLocks) DeallocateAllSemaphores() error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// LockSemaphore locks the given semaphore.
+// If the semaphore is already locked, LockSemaphore will block until the lock
+// can be acquired.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) LockSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// UnlockSemaphore unlocks the given semaphore.
+// Unlocking a semaphore that is already unlocked with return EBUSY.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
diff --git a/libpod/container_log.go b/libpod/logs/log.go
index 374e5a1fc..0b1703567 100644
--- a/libpod/container_log.go
+++ b/libpod/logs/log.go
@@ -1,31 +1,29 @@
-package libpod
+package logs
import (
"fmt"
"io/ioutil"
- "os"
"strings"
"sync"
"time"
"github.com/hpcloud/tail"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
const (
- // logTimeFormat is the time format used in the log.
+ // LogTimeFormat is the time format used in the log.
// It is a modified version of RFC3339Nano that guarantees trailing
// zeroes are not trimmed, taken from
// https://github.com/golang/go/issues/19635
- logTimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
+ LogTimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
- // partialLogType signifies a log line that exceeded the buffer
+ // PartialLogType signifies a log line that exceeded the buffer
// length and needed to spill into a new line
- partialLogType = "P"
+ PartialLogType = "P"
- // fullLogType signifies a log line is full
- fullLogType = "F"
+ // FullLogType signifies a log line is full
+ FullLogType = "F"
)
// LogOptions is the options you can use for logs
@@ -48,72 +46,8 @@ type LogLine struct {
CID string
}
-// Log is a runtime function that can read one or more container logs.
-func (r *Runtime) Log(containers []*Container, options *LogOptions, logChannel chan *LogLine) error {
- for _, ctr := range containers {
- if err := ctr.ReadLog(options, logChannel); err != nil {
- return err
- }
- }
- return nil
-}
-
-// ReadLog reads a containers log based on the input options and returns loglines over a channel
-func (c *Container) ReadLog(options *LogOptions, logChannel chan *LogLine) error {
- // TODO Skip sending logs until journald logs can be read
- // TODO make this not a magic string
- if c.LogDriver() == JournaldLogging {
- return c.readFromJournal(options, logChannel)
- }
- return c.readFromLogFile(options, logChannel)
-}
-
-func (c *Container) readFromLogFile(options *LogOptions, logChannel chan *LogLine) error {
- t, tailLog, err := getLogFile(c.LogPath(), options)
- if err != nil {
- // If the log file does not exist, this is not fatal.
- if os.IsNotExist(errors.Cause(err)) {
- return nil
- }
- return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
- }
- options.WaitGroup.Add(1)
- if len(tailLog) > 0 {
- for _, nll := range tailLog {
- nll.CID = c.ID()
- if nll.Since(options.Since) {
- logChannel <- nll
- }
- }
- }
-
- go func() {
- var partial string
- for line := range t.Lines {
- nll, err := newLogLine(line.Text)
- if err != nil {
- logrus.Error(err)
- continue
- }
- if nll.Partial() {
- partial = partial + nll.Msg
- continue
- } else if !nll.Partial() && len(partial) > 1 {
- nll.Msg = partial
- partial = ""
- }
- nll.CID = c.ID()
- if nll.Since(options.Since) {
- logChannel <- nll
- }
- }
- options.WaitGroup.Done()
- }()
- return nil
-}
-
-// getLogFile returns an hp tail for a container given options
-func getLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error) {
+// GetLogFile returns an hp tail for a container given options
+func GetLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error) {
var (
whence int
err error
@@ -154,7 +88,7 @@ func getTailLog(path string, tail int) ([]*LogLine, error) {
if len(splitContent[i]) == 0 {
continue
}
- nll, err := newLogLine(splitContent[i])
+ nll, err := NewLogLine(splitContent[i])
if err != nil {
return nil, err
}
@@ -191,7 +125,7 @@ func (l *LogLine) String(options *LogOptions) string {
out = fmt.Sprintf("%s ", cid)
}
if options.Timestamps {
- out = out + fmt.Sprintf("%s ", l.Time.Format(logTimeFormat))
+ out = out + fmt.Sprintf("%s ", l.Time.Format(LogTimeFormat))
}
return out + l.Msg
}
@@ -201,13 +135,13 @@ func (l *LogLine) Since(since time.Time) bool {
return l.Time.After(since)
}
-// newLogLine creates a logLine struct from a container log string
-func newLogLine(line string) (*LogLine, error) {
+// NewLogLine creates a logLine struct from a container log string
+func NewLogLine(line string) (*LogLine, error) {
splitLine := strings.Split(line, " ")
if len(splitLine) < 4 {
return nil, errors.Errorf("'%s' is not a valid container log line", line)
}
- logTime, err := time.Parse(logTimeFormat, splitLine[0])
+ logTime, err := time.Parse(LogTimeFormat, splitLine[0])
if err != nil {
return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
}
@@ -222,8 +156,5 @@ func newLogLine(line string) (*LogLine, error) {
// Partial returns a bool if the log line is a partial log type
func (l *LogLine) Partial() bool {
- if l.ParseLogType == partialLogType {
- return true
- }
- return false
+ return l.ParseLogType == PartialLogType
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index b8a916de3..bef3f7739 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -16,8 +16,8 @@ import (
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
+ "github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/firewall"
- "github.com/containers/libpod/pkg/inspect"
"github.com/containers/libpod/pkg/netns"
"github.com/containers/libpod/pkg/rootless"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -29,21 +29,23 @@ import (
// Get an OCICNI network config
func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork {
+ defaultNetwork := r.netPlugin.GetDefaultNetworkName()
network := ocicni.PodNetwork{
- Name: name,
- Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
- ID: id,
- NetNS: nsPath,
- PortMappings: ports,
- Networks: networks,
+ Name: name,
+ Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
+ ID: id,
+ NetNS: nsPath,
+ Networks: networks,
+ RuntimeConfig: map[string]ocicni.RuntimeConfig{
+ defaultNetwork: {PortMappings: ports},
+ },
}
if staticIP != nil {
- defaultNetwork := r.netPlugin.GetDefaultNetworkName()
-
network.Networks = []string{defaultNetwork}
- network.NetworkConfig = make(map[string]ocicni.NetworkConfig)
- network.NetworkConfig[defaultNetwork] = ocicni.NetworkConfig{IP: staticIP.String()}
+ network.RuntimeConfig = map[string]ocicni.RuntimeConfig{
+ defaultNetwork: {IP: staticIP.String(), PortMappings: ports},
+ }
}
return network
@@ -149,8 +151,8 @@ func checkSlirpFlags(path string) (bool, bool, error) {
// Configure the network namespace for a rootless container
func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
- defer ctr.rootlessSlirpSyncR.Close()
- defer ctr.rootlessSlirpSyncW.Close()
+ defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncR)
+ defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncW)
path := r.config.NetworkCmdPath
@@ -167,11 +169,11 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
if err != nil {
return errors.Wrapf(err, "failed to open pipe")
}
- defer syncR.Close()
- defer syncW.Close()
+ defer errorhandling.CloseQuiet(syncR)
+ defer errorhandling.CloseQuiet(syncW)
havePortMapping := len(ctr.Config().PortMappings) > 0
- apiSocket := filepath.Join(r.ociRuntime.tmpDir, fmt.Sprintf("%s.net", ctr.config.ID))
+ apiSocket := filepath.Join(ctr.ociRuntime.tmpDir, fmt.Sprintf("%s.net", ctr.config.ID))
cmdArgs := []string{}
if havePortMapping {
@@ -199,7 +201,11 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
if err := cmd.Start(); err != nil {
return errors.Wrapf(err, "failed to start slirp4netns process")
}
- defer cmd.Process.Release()
+ defer func() {
+ if err := cmd.Process.Release(); err != nil {
+ logrus.Errorf("unable to release comman process: %q", err)
+ }
+ }()
b := make([]byte, 16)
for {
@@ -266,7 +272,11 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
if err != nil {
return errors.Wrapf(err, "cannot open connection to %s", apiSocket)
}
- defer conn.Close()
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Errorf("unable to close connection: %q", err)
+ }
+ }()
hostIP := i.HostIP
if hostIP == "" {
hostIP = "0.0.0.0"
@@ -293,14 +303,14 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
}
buf := make([]byte, 2048)
- len, err := conn.Read(buf)
+ readLength, err := conn.Read(buf)
if err != nil {
return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
}
// if there is no 'error' key in the received JSON data, then the operation was
// successful.
var y map[string]interface{}
- if err := json.Unmarshal(buf[0:len], &y); err != nil {
+ if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
return errors.Wrapf(err, "error parsing error status from slirp4netns")
}
if e, found := y["error"]; found {
@@ -331,7 +341,9 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) {
if err != nil {
return errors.Wrapf(err, "cannot open %s", nsPath)
}
- mountPointFd.Close()
+ if err := mountPointFd.Close(); err != nil {
+ return err
+ }
if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
return errors.Wrapf(err, "cannot mount %s", nsPath)
@@ -351,12 +363,12 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) {
// Join an existing network namespace
func joinNetNS(path string) (ns.NetNS, error) {
- ns, err := ns.GetNS(path)
+ netNS, err := ns.GetNS(path)
if err != nil {
return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path)
}
- return ns, nil
+ return netNS, nil
}
// Close a network namespace.
@@ -470,7 +482,7 @@ func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) {
return netStats, err
}
-func (c *Container) getContainerNetworkInfo(data *inspect.ContainerInspectData) *inspect.ContainerInspectData {
+func (c *Container) getContainerNetworkInfo(data *InspectContainerData) *InspectContainerData {
if c.state.NetNS != nil && len(c.state.NetworkStatus) > 0 {
// Report network settings from the first pod network
result := c.state.NetworkStatus[0]
diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go
index 3a8ac4455..d9b3730aa 100644
--- a/libpod/networking_unsupported.go
+++ b/libpod/networking_unsupported.go
@@ -2,26 +2,24 @@
package libpod
-import (
- "github.com/containers/libpod/pkg/inspect"
-)
+import "github.com/containers/libpod/libpod/define"
func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (r *Runtime) setupNetNS(ctr *Container) (err error) {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (r *Runtime) teardownNetNS(ctr *Container) error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (r *Runtime) createNetNS(ctr *Container) (err error) {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
-func (c *Container) getContainerNetworkInfo(data *inspect.ContainerInspectData) *inspect.ContainerInspectData {
+func (c *Container) getContainerNetworkInfo(data *InspectContainerData) *InspectContainerData {
return nil
}
diff --git a/libpod/oci.go b/libpod/oci.go
index 7138108c5..193e66aaf 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -11,6 +11,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/util"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -58,32 +59,64 @@ type OCIRuntime struct {
logSizeMax int64
noPivot bool
reservePorts bool
+ supportsJSON bool
}
-// syncInfo is used to return data from monitor process to daemon
-type syncInfo struct {
- Pid int `json:"pid"`
- Message string `json:"message,omitempty"`
+// ociError is used to parse the OCI runtime JSON log. It is not part of the
+// OCI runtime specifications, it follows what runc does
+type ociError struct {
+ Level string `json:"level,omitempty"`
+ Time string `json:"time,omitempty"`
+ Msg string `json:"msg,omitempty"`
}
-// Make a new OCI runtime with provided options
-func newOCIRuntime(oruntime OCIRuntimePath, conmonPath string, conmonEnv []string, cgroupManager string, tmpDir string, logSizeMax int64, noPivotRoot bool, reservePorts bool) (*OCIRuntime, error) {
+// Make a new OCI runtime with provided options.
+// The first path that points to a valid executable will be used.
+func newOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *RuntimeConfig, supportsJSON bool) (*OCIRuntime, error) {
+ if name == "" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name")
+ }
+
runtime := new(OCIRuntime)
- runtime.name = oruntime.Name
- runtime.path = oruntime.Paths[0]
+ runtime.name = name
runtime.conmonPath = conmonPath
- runtime.conmonEnv = conmonEnv
- runtime.cgroupManager = cgroupManager
- runtime.tmpDir = tmpDir
- runtime.logSizeMax = logSizeMax
- runtime.noPivot = noPivotRoot
- runtime.reservePorts = reservePorts
+
+ runtime.conmonEnv = runtimeCfg.ConmonEnvVars
+ runtime.cgroupManager = runtimeCfg.CgroupManager
+ runtime.tmpDir = runtimeCfg.TmpDir
+ runtime.logSizeMax = runtimeCfg.MaxLogSize
+ runtime.noPivot = runtimeCfg.NoPivotRoot
+ runtime.reservePorts = runtimeCfg.EnablePortReservation
+
+ // TODO: probe OCI runtime for feature and enable automatically if
+ // available.
+ runtime.supportsJSON = supportsJSON
+
+ foundPath := false
+ for _, path := range paths {
+ stat, err := os.Stat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return nil, errors.Wrapf(err, "cannot stat %s", path)
+ }
+ if !stat.Mode().IsRegular() {
+ continue
+ }
+ foundPath = true
+ runtime.path = path
+ break
+ }
+ if !foundPath {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name)
+ }
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket")
- if cgroupManager != CgroupfsCgroupsManager && cgroupManager != SystemdCgroupsManager {
- return nil, errors.Wrapf(ErrInvalidArg, "invalid cgroup manager specified: %s", cgroupManager)
+ if runtime.cgroupManager != CgroupfsCgroupsManager && runtime.cgroupManager != SystemdCgroupsManager {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager)
}
// Create the exit files and attach sockets directories
@@ -130,7 +163,6 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
return nil, errors.Wrapf(err, "cannot get file for UDP socket")
}
files = append(files, f)
- break
case "tcp":
addr, err := net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
@@ -147,13 +179,11 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
return nil, errors.Wrapf(err, "cannot get file for TCP socket")
}
files = append(files, f)
- break
case "sctp":
if !notifySCTP {
notifySCTP = true
logrus.Warnf("port reservation for SCTP is not supported")
}
- break
default:
return nil, fmt.Errorf("unknown protocol %s", i.Protocol)
@@ -178,7 +208,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
// If not using the OCI runtime, we don't need to do most of this.
if !useRuntime {
// If the container's not running, nothing to do.
- if ctr.state.State != ContainerStateRunning && ctr.state.State != ContainerStatePaused {
+ if ctr.state.State != define.ContainerStateRunning && ctr.state.State != define.ContainerStatePaused {
return nil
}
@@ -194,7 +224,9 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
}
// Alright, it exists. Transition to Stopped state.
- ctr.state.State = ContainerStateStopped
+ ctr.state.State = define.ContainerStateStopped
+ ctr.state.PID = 0
+ ctr.state.ConmonPID = 0
// Read the exit file to get our stopped time and exit code.
return ctr.handleExitFile(exitFile, info)
@@ -207,6 +239,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
cmd := exec.Command(r.path, "state", ctr.ID())
cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
+
outPipe, err := cmd.StdoutPipe()
if err != nil {
return errors.Wrapf(err, "getting stdout pipe")
@@ -222,17 +255,23 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
return errors.Wrapf(err, "error getting container %s state", ctr.ID())
}
if strings.Contains(string(out), "does not exist") {
- ctr.removeConmonFiles()
+ if err := ctr.removeConmonFiles(); err != nil {
+ logrus.Debugf("unable to remove conmon files for container %s", ctr.ID())
+ }
ctr.state.ExitCode = -1
ctr.state.FinishedTime = time.Now()
- ctr.state.State = ContainerStateExited
+ ctr.state.State = define.ContainerStateExited
return nil
}
return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
}
- defer cmd.Wait()
+ defer func() {
+ _ = cmd.Wait()
+ }()
- errPipe.Close()
+ if err := errPipe.Close(); err != nil {
+ return err
+ }
out, err := ioutil.ReadAll(outPipe)
if err != nil {
return errors.Wrapf(err, "error reading stdout: %s", ctr.ID())
@@ -244,21 +283,21 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
switch state.Status {
case "created":
- ctr.state.State = ContainerStateCreated
+ ctr.state.State = define.ContainerStateCreated
case "paused":
- ctr.state.State = ContainerStatePaused
+ ctr.state.State = define.ContainerStatePaused
case "running":
- ctr.state.State = ContainerStateRunning
+ ctr.state.State = define.ContainerStateRunning
case "stopped":
- ctr.state.State = ContainerStateStopped
+ ctr.state.State = define.ContainerStateStopped
default:
- return errors.Wrapf(ErrInternal, "unrecognized status returned by runtime for container %s: %s",
+ return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s",
ctr.ID(), state.Status)
}
// Only grab exit status if we were not already stopped
// If we were, it should already be in the database
- if ctr.state.State == ContainerStateStopped && oldState != ContainerStateStopped {
+ if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped {
var fi os.FileInfo
chWait := make(chan error)
defer close(chWait)
@@ -346,104 +385,11 @@ func (r *OCIRuntime) unpauseContainer(ctr *Container) error {
return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "resume", ctr.ID())
}
-// execContainer executes a command in a running container
-// TODO: Add --detach support
-// TODO: Convert to use conmon
-// TODO: add --pid-file and use that to generate exec session tracking
-func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty bool, cwd, user, sessionID string, streams *AttachStreams, preserveFDs int) (*exec.Cmd, error) {
- if len(cmd) == 0 {
- return nil, errors.Wrapf(ErrInvalidArg, "must provide a command to execute")
- }
-
- if sessionID == "" {
- return nil, errors.Wrapf(ErrEmptyID, "must provide a session ID for exec")
- }
-
- runtimeDir, err := util.GetRootlessRuntimeDir()
- if err != nil {
- return nil, err
- }
-
- args := []string{}
-
- // TODO - should we maintain separate logpaths for exec sessions?
- args = append(args, "exec")
-
- if cwd != "" {
- args = append(args, "--cwd", cwd)
- }
-
- args = append(args, "--pid-file", c.execPidPath(sessionID))
-
- if tty {
- args = append(args, "--tty")
- } else {
- args = append(args, "--tty=false")
- }
-
- if user != "" {
- args = append(args, "--user", user)
- }
-
- if preserveFDs > 0 {
- args = append(args, fmt.Sprintf("--preserve-fds=%d", preserveFDs))
- }
- if c.config.Spec.Process.NoNewPrivileges {
- args = append(args, "--no-new-privs")
- }
-
- for _, cap := range capAdd {
- args = append(args, "--cap", cap)
- }
-
- for _, envVar := range env {
- args = append(args, "--env", envVar)
- }
-
- // Append container ID, name and command
- args = append(args, c.ID())
- args = append(args, cmd...)
-
- logrus.Debugf("Starting runtime %s with following arguments: %v", r.path, args)
-
- execCmd := exec.Command(r.path, args...)
-
- if streams.AttachOutput {
- execCmd.Stdout = streams.OutputStream
- }
- if streams.AttachInput {
- execCmd.Stdin = streams.InputStream
- }
- if streams.AttachError {
- execCmd.Stderr = streams.ErrorStream
- }
-
- execCmd.Env = append(execCmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
-
- if preserveFDs > 0 {
- for fd := 3; fd < 3+preserveFDs; fd++ {
- execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
- }
- }
-
- if err := execCmd.Start(); err != nil {
- return nil, errors.Wrapf(err, "cannot start container %s", c.ID())
- }
-
- if preserveFDs > 0 {
- for fd := 3; fd < 3+preserveFDs; fd++ {
- // These fds were passed down to the runtime. Close them
- // and not interfere
- os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close()
- }
- }
-
- return execCmd, nil
-}
-
// checkpointContainer checkpoints the given container
func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
- label.SetSocketLabel(ctr.ProcessLabel())
+ if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil {
+ return err
+ }
// imagePath is used by CRIU to store the actual checkpoint files
imagePath := ctr.CheckpointPath()
// workPath will be used to store dump.log and stats-dump
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go
new file mode 100644
index 000000000..22afa7416
--- /dev/null
+++ b/libpod/oci_attach_linux.go
@@ -0,0 +1,260 @@
+//+build linux
+
+package libpod
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/errorhandling"
+ "github.com/containers/libpod/pkg/kubeutils"
+ "github.com/containers/libpod/utils"
+ "github.com/docker/docker/pkg/term"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+/* Sync with stdpipe_t in conmon.c */
+const (
+ AttachPipeStdin = 1
+ AttachPipeStdout = 2
+ AttachPipeStderr = 3
+)
+
+// Attach to the given container
+// Does not check if state is appropriate
+// started is only required if startContainer is true
+func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error {
+ if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
+ return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ }
+ if startContainer && started == nil {
+ return errors.Wrapf(define.ErrInternal, "started chan not passed when startContainer set")
+ }
+
+ detachKeys, err := processDetachKeys(keys)
+ if err != nil {
+ return err
+ }
+
+ logrus.Debugf("Attaching to container %s", c.ID())
+
+ registerResizeFunc(resize, c.bundlePath())
+
+ socketPath := buildSocketPath(c.AttachSocketPath())
+
+ conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ if err != nil {
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Errorf("unable to close socket: %q", err)
+ }
+ }()
+
+ // If starting was requested, start the container and notify when that's
+ // done.
+ if startContainer {
+ if err := c.start(); err != nil {
+ return err
+ }
+ started <- true
+ }
+
+ receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys)
+ return readStdio(streams, receiveStdoutError, stdinDone)
+}
+
+// Attach to the given container's exec session
+// attachFd and startFd must be open file descriptors
+// attachFd must be the output side of the fd. attachFd is used for two things:
+// conmon will first send a nonse value across the pipe indicating it has set up its side of the console socket
+// this ensures attachToExec gets all of the output of the called process
+// conmon will then send the exit code of the exec process, or an error in the exec session
+// startFd must be the input side of the fd.
+// conmon will wait to start the exec session until the parent process has setup the console socket.
+// Once attachToExec successfully attaches to the console socket, the child conmon process responsible for calling runtime exec
+// will read from the output side of start fd, thus learning to start the child process.
+// Thus, the order goes as follow:
+// 1. conmon parent process sets up its console socket. sends on attachFd
+// 2. attachToExec attaches to the console socket after reading on attachFd
+// 3. child waits on startFd for attachToExec to attach to said console socket
+// 4. attachToExec sends on startFd, signalling it has attached to the socket and child is ready to go
+// 5. child receives on startFd, runs the runtime exec command
+// attachToExec is responsible for closing startFd and attachFd
+func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, sessionID string, startFd, attachFd *os.File) error {
+ if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
+ return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
+ }
+ if startFd == nil || attachFd == nil {
+ return errors.Wrapf(define.ErrInvalidArg, "start sync pipe and attach sync pipe must be defined for exec attach")
+ }
+
+ defer errorhandling.CloseQuiet(startFd)
+ defer errorhandling.CloseQuiet(attachFd)
+
+ detachKeys, err := processDetachKeys(keys)
+ if err != nil {
+ return err
+ }
+
+ logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID)
+
+ registerResizeFunc(resize, c.execBundlePath(sessionID))
+
+ // set up the socket path, such that it is the correct length and location for exec
+ socketPath := buildSocketPath(c.execAttachSocketPath(sessionID))
+
+ // 2: read from attachFd that the parent process has set up the console socket
+ if _, err := readConmonPipeData(attachFd, ""); err != nil {
+ return err
+ }
+ // 2: then attach
+ conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ if err != nil {
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Errorf("unable to close socket: %q", err)
+ }
+ }()
+
+ // start listening on stdio of the process
+ receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys)
+
+ // 4: send start message to child
+ if err := writeConmonPipeData(startFd); err != nil {
+ return err
+ }
+
+ return readStdio(streams, receiveStdoutError, stdinDone)
+}
+
+func processDetachKeys(keys string) ([]byte, error) {
+ // Check the validity of the provided keys first
+ if len(keys) == 0 {
+ keys = DefaultDetachKeys
+ }
+ detachKeys, err := term.ToBytes(keys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid detach keys")
+ }
+ return detachKeys, nil
+}
+
+func registerResizeFunc(resize <-chan remotecommand.TerminalSize, bundlePath string) {
+ kubeutils.HandleResizing(resize, func(size remotecommand.TerminalSize) {
+ controlPath := filepath.Join(bundlePath, "ctl")
+ controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
+ if err != nil {
+ logrus.Debugf("Could not open ctl file: %v", err)
+ return
+ }
+ defer controlFile.Close()
+
+ logrus.Debugf("Received a resize event: %+v", size)
+ if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, size.Height, size.Width); err != nil {
+ logrus.Warnf("Failed to write to control file to resize terminal: %v", err)
+ }
+ })
+}
+
+func buildSocketPath(socketPath string) string {
+ maxUnixLength := unixPathLength()
+ if maxUnixLength < len(socketPath) {
+ socketPath = socketPath[0:maxUnixLength]
+ }
+
+ logrus.Debug("connecting to socket ", socketPath)
+ return socketPath
+}
+
+func setupStdioChannels(streams *AttachStreams, conn *net.UnixConn, detachKeys []byte) (chan error, chan error) {
+ receiveStdoutError := make(chan error)
+ go func() {
+ receiveStdoutError <- redirectResponseToOutputStreams(streams.OutputStream, streams.ErrorStream, streams.AttachOutput, streams.AttachError, conn)
+ }()
+
+ stdinDone := make(chan error)
+ go func() {
+ var err error
+ if streams.AttachInput {
+ _, err = utils.CopyDetachable(conn, streams.InputStream, detachKeys)
+ if connErr := conn.CloseWrite(); connErr != nil {
+ logrus.Errorf("unable to close conn: %q", connErr)
+ }
+ }
+ stdinDone <- err
+ }()
+
+ return receiveStdoutError, stdinDone
+}
+
+func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeOutput, writeError bool, conn io.Reader) error {
+ var err error
+ buf := make([]byte, 8192+1) /* Sync with conmon STDIO_BUF_SIZE */
+ for {
+ nr, er := conn.Read(buf)
+ if nr > 0 {
+ var dst io.Writer
+ var doWrite bool
+ switch buf[0] {
+ case AttachPipeStdout:
+ dst = outputStream
+ doWrite = writeOutput
+ case AttachPipeStderr:
+ dst = errorStream
+ doWrite = writeError
+ default:
+ logrus.Infof("Received unexpected attach type %+d", buf[0])
+ }
+ if dst == nil {
+ return errors.New("output destination cannot be nil")
+ }
+
+ if doWrite {
+ nw, ew := dst.Write(buf[1:nr])
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw+1 {
+ err = io.ErrShortWrite
+ break
+ }
+ }
+ }
+ if er == io.EOF {
+ break
+ }
+ if er != nil {
+ err = er
+ break
+ }
+ }
+ return err
+}
+
+func readStdio(streams *AttachStreams, receiveStdoutError, stdinDone chan error) error {
+ var err error
+ select {
+ case err = <-receiveStdoutError:
+ return err
+ case err = <-stdinDone:
+ if err == define.ErrDetach {
+ return err
+ }
+ if streams.AttachOutput || streams.AttachError {
+ return <-receiveStdoutError
+ }
+ }
+ return nil
+}
diff --git a/libpod/oci_attach_linux_cgo.go b/libpod/oci_attach_linux_cgo.go
new file mode 100644
index 000000000..d81243360
--- /dev/null
+++ b/libpod/oci_attach_linux_cgo.go
@@ -0,0 +1,11 @@
+//+build linux,cgo
+
+package libpod
+
+//#include <sys/un.h>
+// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
+import "C"
+
+func unixPathLength() int {
+ return int(C.unix_path_length())
+}
diff --git a/libpod/oci_attach_linux_nocgo.go b/libpod/oci_attach_linux_nocgo.go
new file mode 100644
index 000000000..a514a555d
--- /dev/null
+++ b/libpod/oci_attach_linux_nocgo.go
@@ -0,0 +1,7 @@
+//+build linux,!cgo
+
+package libpod
+
+func unixPathLength() int {
+ return 107
+}
diff --git a/libpod/oci_attach_unsupported.go b/libpod/oci_attach_unsupported.go
new file mode 100644
index 000000000..987d2c973
--- /dev/null
+++ b/libpod/oci_attach_unsupported.go
@@ -0,0 +1,18 @@
+//+build !linux
+
+package libpod
+
+import (
+ "os"
+
+ "github.com/containers/libpod/libpod/define"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error {
+ return define.ErrNotImplemented
+}
+
+func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, sessionID string, startFd *os.File, attachFd *os.File) error {
+ return define.ErrNotImplemented
+}
diff --git a/libpod/oci_internal_linux.go b/libpod/oci_internal_linux.go
new file mode 100644
index 000000000..0bcd021db
--- /dev/null
+++ b/libpod/oci_internal_linux.go
@@ -0,0 +1,496 @@
+// +build linux
+
+package libpod
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/cgroups"
+ "github.com/containers/libpod/pkg/errorhandling"
+ "github.com/containers/libpod/pkg/lookup"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/containers/libpod/utils"
+ "github.com/coreos/go-systemd/activation"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/selinux/go-selinux"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// createOCIContainer generates this container's main conmon instance and prepares it for starting
+func (r *OCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) {
+ var stderrBuf bytes.Buffer
+
+ runtimeDir, err := util.GetRootlessRuntimeDir()
+ if err != nil {
+ return err
+ }
+
+ parentSyncPipe, childSyncPipe, err := newPipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating socket pair")
+ }
+ defer errorhandling.CloseQuiet(parentSyncPipe)
+
+ childStartPipe, parentStartPipe, err := newPipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating socket pair for start pipe")
+ }
+
+ defer errorhandling.CloseQuiet(parentStartPipe)
+
+ var ociLog string
+ if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
+ ociLog = filepath.Join(ctr.state.RunDir, "oci-log")
+ }
+ args := r.sharedConmonArgs(ctr, ctr.ID(), ctr.bundlePath(), filepath.Join(ctr.state.RunDir, "pidfile"), ctr.LogPath(), r.exitsDir, ociLog)
+
+ if ctr.config.Spec.Process.Terminal {
+ args = append(args, "-t")
+ } else if ctr.config.Stdin {
+ args = append(args, "-i")
+ }
+
+ if ctr.config.ConmonPidFile != "" {
+ args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile)
+ }
+
+ if r.noPivot {
+ args = append(args, "--no-pivot")
+ }
+
+ if len(ctr.config.ExitCommand) > 0 {
+ args = append(args, "--exit-command", ctr.config.ExitCommand[0])
+ for _, arg := range ctr.config.ExitCommand[1:] {
+ args = append(args, []string{"--exit-command-arg", arg}...)
+ }
+ }
+
+ if restoreOptions != nil {
+ args = append(args, "--restore", ctr.CheckpointPath())
+ if restoreOptions.TCPEstablished {
+ args = append(args, "--runtime-opt", "--tcp-established")
+ }
+ }
+
+ logrus.WithFields(logrus.Fields{
+ "args": args,
+ }).Debugf("running conmon: %s", r.conmonPath)
+
+ cmd := exec.Command(r.conmonPath, args...)
+ cmd.Dir = ctr.bundlePath()
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ }
+ // TODO this is probably a really bad idea for some uses
+ // Make this configurable
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if ctr.config.Spec.Process.Terminal {
+ cmd.Stderr = &stderrBuf
+ }
+
+ // 0, 1 and 2 are stdin, stdout and stderr
+ conmonEnv, envFiles, err := r.configureConmonEnv(runtimeDir)
+ if err != nil {
+ return err
+ }
+
+ cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3), fmt.Sprintf("_OCI_STARTPIPE=%d", 4))
+ cmd.Env = append(cmd.Env, conmonEnv...)
+ cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe)
+ cmd.ExtraFiles = append(cmd.ExtraFiles, envFiles...)
+
+ if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() {
+ ports, err := bindPorts(ctr.config.PortMappings)
+ if err != nil {
+ return err
+ }
+
+ // Leak the port we bound in the conmon process. These fd's won't be used
+ // by the container and conmon will keep the ports busy so that another
+ // process cannot use them.
+ cmd.ExtraFiles = append(cmd.ExtraFiles, ports...)
+ }
+
+ if ctr.config.NetMode.IsSlirp4netns() {
+ ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
+ if err != nil {
+ return errors.Wrapf(err, "failed to create rootless network sync pipe")
+ }
+ // Leak one end in conmon, the other one will be leaked into slirp4netns
+ cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW)
+ }
+
+ err = startCommandGivenSelinux(cmd)
+ // regardless of whether we errored or not, we no longer need the children pipes
+ childSyncPipe.Close()
+ childStartPipe.Close()
+ if err != nil {
+ return err
+ }
+ if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe, ctr.ID()); err != nil {
+ return err
+ }
+ /* Wait for initial setup and fork, and reap child */
+ err = cmd.Wait()
+ if err != nil {
+ return err
+ }
+
+ pid, err := readConmonPipeData(parentSyncPipe, ociLog)
+ if err != nil {
+ if err2 := r.deleteContainer(ctr); err2 != nil {
+ logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID())
+ }
+ return err
+ }
+ ctr.state.PID = pid
+
+ conmonPID, err := readConmonPidFile(ctr.config.ConmonPidFile)
+ if err != nil {
+ logrus.Warnf("error reading conmon pid file for container %s: %s", ctr.ID(), err.Error())
+ } else if conmonPID > 0 {
+ // conmon not having a pid file is a valid state, so don't set it if we don't have it
+ logrus.Infof("Got Conmon PID as %d", conmonPID)
+ ctr.state.ConmonPID = conmonPID
+ }
+
+ return nil
+}
+
+// prepareProcessExec returns the path of the process.json used in runc exec -p
+// caller is responsible to close the returned *os.File if needed.
+func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, sessionID string) (*os.File, error) {
+ f, err := ioutil.TempFile(c.execBundlePath(sessionID), "exec-process-")
+ if err != nil {
+ return nil, err
+ }
+
+ pspec := c.config.Spec.Process
+ pspec.Args = cmd
+ // We need to default this to false else it will inherit terminal as true
+ // from the container.
+ pspec.Terminal = false
+ if tty {
+ pspec.Terminal = true
+ }
+ if len(env) > 0 {
+ pspec.Env = append(pspec.Env, env...)
+ }
+
+ if cwd != "" {
+ pspec.Cwd = cwd
+
+ }
+ // If user was set, look it up in the container to get a UID to use on
+ // the host
+ if user != "" {
+ execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, nil)
+ if err != nil {
+ return nil, err
+ }
+ sgids := make([]uint32, 0, len(execUser.Sgids))
+ for _, sgid := range execUser.Sgids {
+ sgids = append(sgids, uint32(sgid))
+ }
+ processUser := spec.User{
+ UID: uint32(execUser.Uid),
+ GID: uint32(execUser.Gid),
+ AdditionalGids: sgids,
+ }
+
+ pspec.User = processUser
+ }
+
+ processJSON, err := json.Marshal(pspec)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// configureConmonEnv gets the environment values to add to conmon's exec struct
+// TODO this may want to be less hardcoded/more configurable in the future
+func (r *OCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*os.File, error) {
+ env := make([]string, 0, 6)
+ env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
+ env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
+ env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
+ home, err := homeDir()
+ if err != nil {
+ return nil, nil, err
+ }
+ env = append(env, fmt.Sprintf("HOME=%s", home))
+
+ extraFiles := make([]*os.File, 0)
+ if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok {
+ env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify))
+ }
+ if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok {
+ env = append(env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1")
+ fds := activation.Files(false)
+ extraFiles = append(extraFiles, fds...)
+ }
+ return env, extraFiles, nil
+}
+
+// sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI
+func (r *OCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath string) []string {
+ // set the conmon API version to be able to use the correct sync struct keys
+ args := []string{"--api-version", "1"}
+ if r.cgroupManager == SystemdCgroupsManager {
+ args = append(args, "-s")
+ }
+ args = append(args, "-c", ctr.ID())
+ args = append(args, "-u", cuuid)
+ args = append(args, "-r", r.path)
+ args = append(args, "-b", bundlePath)
+ args = append(args, "-p", pidPath)
+
+ var logDriver string
+ switch ctr.LogDriver() {
+ case JournaldLogging:
+ logDriver = JournaldLogging
+ case JSONLogging:
+ fallthrough
+ default: //nolint-stylecheck
+ // No case here should happen except JSONLogging, but keep this here in case the options are extended
+ logrus.Errorf("%s logging specified but not supported. Choosing k8s-file logging instead", ctr.LogDriver())
+ fallthrough
+ case KubernetesLogging:
+ logDriver = fmt.Sprintf("%s:%s", KubernetesLogging, logPath)
+ }
+
+ args = append(args, "-l", logDriver)
+ args = append(args, "--exit-dir", exitDir)
+ args = append(args, "--socket-dir-path", r.socketsDir)
+ if r.logSizeMax >= 0 {
+ args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax))
+ }
+
+ logLevel := logrus.GetLevel()
+ args = append(args, "--log-level", logLevel.String())
+
+ if logLevel == logrus.DebugLevel {
+ logrus.Debugf("%s messages will be logged to syslog", r.conmonPath)
+ args = append(args, "--syslog")
+ }
+ if ociLogPath != "" {
+ args = append(args, "--runtime-arg", "--log-format=json", "--runtime-arg", "--log", fmt.Sprintf("--runtime-arg=%s", ociLogPath))
+ }
+ return args
+}
+
+// startCommandGivenSelinux starts a container ensuring to set the labels of
+// the process to make sure SELinux doesn't block conmon communication, if SELinux is enabled
+func startCommandGivenSelinux(cmd *exec.Cmd) error {
+ if !selinux.GetEnabled() {
+ return cmd.Start()
+ }
+ // Set the label of the conmon process to be level :s0
+ // This will allow the container processes to talk to fifo-files
+ // passed into the container by conmon
+ var (
+ plabel string
+ con selinux.Context
+ err error
+ )
+ plabel, err = selinux.CurrentLabel()
+ if err != nil {
+ return errors.Wrapf(err, "Failed to get current SELinux label")
+ }
+
+ con, err = selinux.NewContext(plabel)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to get new context from SELinux label")
+ }
+
+ runtime.LockOSThread()
+ if con["level"] != "s0" && con["level"] != "" {
+ con["level"] = "s0"
+ if err = label.SetProcessLabel(con.Get()); err != nil {
+ runtime.UnlockOSThread()
+ return err
+ }
+ }
+ err = cmd.Start()
+ // Ignore error returned from SetProcessLabel("") call,
+ // can't recover.
+ if labelErr := label.SetProcessLabel(""); labelErr != nil {
+ logrus.Errorf("unable to set process label: %q", err)
+ }
+ runtime.UnlockOSThread()
+ return err
+}
+
+// moveConmonToCgroupAndSignal gets a container's cgroupParent and moves the conmon process to that cgroup
+// it then signals for conmon to start by sending nonse data down the start fd
+func (r *OCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec.Cmd, startFd *os.File, uuid string) error {
+ cgroupParent := ctr.CgroupParent()
+ if os.Geteuid() == 0 {
+ if r.cgroupManager == SystemdCgroupsManager {
+ unitName := createUnitName("libpod-conmon", ctr.ID())
+
+ realCgroupParent := cgroupParent
+ splitParent := strings.Split(cgroupParent, "/")
+ if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 {
+ realCgroupParent = splitParent[len(splitParent)-1]
+ }
+
+ logrus.Infof("Running conmon under slice %s and unitName %s", realCgroupParent, unitName)
+ if err := utils.RunUnderSystemdScope(cmd.Process.Pid, realCgroupParent, unitName); err != nil {
+ logrus.Warnf("Failed to add conmon to systemd sandbox cgroup: %v", err)
+ }
+ } else {
+ cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
+ control, err := cgroups.New(cgroupPath, &spec.LinuxResources{})
+ if err != nil {
+ logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
+ } else {
+ // we need to remove this defer and delete the cgroup once conmon exits
+ // maybe need a conmon monitor?
+ if err := control.AddPid(cmd.Process.Pid); err != nil {
+ logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
+ }
+ }
+ }
+ }
+
+ /* We set the cgroup, now the child can start creating children */
+ if err := writeConmonPipeData(startFd); err != nil {
+ return err
+ }
+ return nil
+}
+
+// newPipe creates a unix socket pair for communication
+func newPipe() (parent *os.File, child *os.File, err error) {
+ fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
+}
+
+// readConmonPidFile attempts to read conmon's pid from its pid file
+func readConmonPidFile(pidFile string) (int, error) {
+ // Let's try reading the Conmon pid at the same time.
+ if pidFile != "" {
+ contents, err := ioutil.ReadFile(pidFile)
+ if err != nil {
+ return -1, err
+ }
+ // Convert it to an int
+ conmonPID, err := strconv.Atoi(string(contents))
+ if err != nil {
+ return -1, err
+ }
+ return conmonPID, nil
+ }
+ return 0, nil
+}
+
+// readConmonPipeData attempts to read a syncInfo struct from the pipe
+func readConmonPipeData(pipe *os.File, ociLog string) (int, error) {
+ // syncInfo is used to return data from monitor process to daemon
+ type syncInfo struct {
+ Data int `json:"data"`
+ Message string `json:"message,omitempty"`
+ }
+
+ // Wait to get container pid from conmon
+ type syncStruct struct {
+ si *syncInfo
+ err error
+ }
+ ch := make(chan syncStruct)
+ go func() {
+ var si *syncInfo
+ rdr := bufio.NewReader(pipe)
+ b, err := rdr.ReadBytes('\n')
+ if err != nil {
+ ch <- syncStruct{err: err}
+ }
+ if err := json.Unmarshal(b, &si); err != nil {
+ ch <- syncStruct{err: err}
+ return
+ }
+ ch <- syncStruct{si: si}
+ }()
+
+ data := -1
+ select {
+ case ss := <-ch:
+ if ss.err != nil {
+ return -1, errors.Wrapf(ss.err, "error reading container (probably exited) json message")
+ }
+ logrus.Debugf("Received: %d", ss.si.Data)
+ if ss.si.Data < 0 {
+ if ociLog != "" {
+ ociLogData, err := ioutil.ReadFile(ociLog)
+ if err == nil {
+ var ociErr ociError
+ if err := json.Unmarshal(ociLogData, &ociErr); err == nil {
+ return ss.si.Data, getOCIRuntimeError(ociErr.Msg)
+ }
+ }
+ }
+ // If we failed to parse the JSON errors, then print the output as it is
+ if ss.si.Message != "" {
+ return ss.si.Data, getOCIRuntimeError(ss.si.Message)
+ }
+ return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed")
+ }
+ data = ss.si.Data
+ case <-time.After(ContainerCreateTimeout):
+ return -1, errors.Wrapf(define.ErrInternal, "container creation timeout")
+ }
+ return data, nil
+}
+
+func getOCIRuntimeError(runtimeMsg string) error {
+ if match, _ := regexp.MatchString(".*permission denied.*", runtimeMsg); match {
+ return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s", strings.Trim(runtimeMsg, "\n"))
+ }
+ if match, _ := regexp.MatchString(".*executable file not found in.*", runtimeMsg); match {
+ return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(runtimeMsg, "\n"))
+ }
+ return errors.Wrapf(define.ErrOCIRuntime, "%s", strings.Trim(runtimeMsg, "\n"))
+}
+
+// writeConmonPipeData writes nonse data to a pipe
+func writeConmonPipeData(pipe *os.File) error {
+ someData := []byte{0}
+ _, err := pipe.Write(someData)
+ return err
+}
+
+// formatRuntimeOpts prepends opts passed to it with --runtime-opt for passing to conmon
+func formatRuntimeOpts(opts ...string) []string {
+ args := make([]string, 0, len(opts)*2)
+ for _, o := range opts {
+ args = append(args, "--runtime-opt", o)
+ }
+ return args
+}
diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go
index 7c1c18052..45365203e 100644
--- a/libpod/oci_linux.go
+++ b/libpod/oci_linux.go
@@ -3,8 +3,6 @@
package libpod
import (
- "bufio"
- "bytes"
"fmt"
"os"
"os/exec"
@@ -14,63 +12,20 @@ import (
"syscall"
"time"
- "github.com/containerd/cgroups"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
pmount "github.com/containers/storage/pkg/mount"
- "github.com/coreos/go-systemd/activation"
- spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/opencontainers/selinux/go-selinux"
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
+ "k8s.io/client-go/tools/remotecommand"
)
const unknownPackage = "Unknown"
-func (r *OCIRuntime) moveConmonToCgroup(ctr *Container, cgroupParent string, cmd *exec.Cmd) error {
- if os.Geteuid() == 0 {
- if r.cgroupManager == SystemdCgroupsManager {
- unitName := createUnitName("libpod-conmon", ctr.ID())
-
- realCgroupParent := cgroupParent
- splitParent := strings.Split(cgroupParent, "/")
- if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 {
- realCgroupParent = splitParent[len(splitParent)-1]
- }
-
- logrus.Infof("Running conmon under slice %s and unitName %s", realCgroupParent, unitName)
- if err := utils.RunUnderSystemdScope(cmd.Process.Pid, realCgroupParent, unitName); err != nil {
- logrus.Warnf("Failed to add conmon to systemd sandbox cgroup: %v", err)
- }
- } else {
- cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
- control, err := cgroups.New(cgroups.V1, cgroups.StaticPath(cgroupPath), &spec.LinuxResources{})
- if err != nil {
- logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
- } else {
- // we need to remove this defer and delete the cgroup once conmon exits
- // maybe need a conmon monitor?
- if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil {
- logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
- }
- }
- }
- }
- return nil
-}
-
-// newPipe creates a unix socket pair for communication
-func newPipe() (parent *os.File, child *os.File, err error) {
- fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
- if err != nil {
- return nil, nil, err
- }
- return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
-}
-
// makeAccessible changes the path permission and each parent directory to have --x--x--x
func makeAccessible(path string, uid, gid int) error {
for ; path != "/"; path = filepath.Dir(path) {
@@ -85,7 +40,7 @@ func makeAccessible(path string, uid, gid int) error {
continue
}
if st.Mode()&0111 != 0111 {
- if err := os.Chmod(path, os.FileMode(st.Mode()|0111)); err != nil {
+ if err := os.Chmod(path, st.Mode()|0111); err != nil {
return err
}
}
@@ -96,7 +51,7 @@ func makeAccessible(path string, uid, gid int) error {
// CreateContainer creates a container in the OCI runtime
// TODO terminal support for container
// Presently just ignoring conmon opts related to it
-func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
+func (r *OCIRuntime) createContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) {
if len(ctr.config.IDMappings.UIDMap) != 0 || len(ctr.config.IDMappings.GIDMap) != 0 {
for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.VolumePath} {
if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil {
@@ -115,13 +70,17 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
if err != nil {
return err
}
- defer fd.Close()
+ defer errorhandling.CloseQuiet(fd)
// create a new mountns on the current thread
if err = unix.Unshare(unix.CLONE_NEWNS); err != nil {
return err
}
- defer unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS)
+ defer func() {
+ if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil {
+ logrus.Errorf("unable to clone new namespace: %q", err)
+ }
+ }()
// don't spread our mounts around. We are setting only /sys to be slave
// so that the cleanup process is still able to umount the storage and the
@@ -144,7 +103,7 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
return errors.Wrapf(err, "cannot unmount %s", m.Mountpoint)
}
}
- return r.createOCIContainer(ctr, cgroupParent, restoreOptions)
+ return r.createOCIContainer(ctr, restoreOptions)
}()
ch <- err
}()
@@ -152,7 +111,7 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
return err
}
}
- return r.createOCIContainer(ctr, cgroupParent, restoreOptions)
+ return r.createOCIContainer(ctr, restoreOptions)
}
func rpmVersion(path string) string {
@@ -187,246 +146,178 @@ func (r *OCIRuntime) conmonPackage() string {
return dpkgVersion(r.conmonPath)
}
-func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
- var stderrBuf bytes.Buffer
+// execContainer executes a command in a running container
+// TODO: Add --detach support
+// TODO: Convert to use conmon
+// TODO: add --pid-file and use that to generate exec session tracking
+func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty bool, cwd, user, sessionID string, streams *AttachStreams, preserveFDs int, resize chan remotecommand.TerminalSize, detachKeys string) (int, chan error, error) {
+ if len(cmd) == 0 {
+ return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
+ }
- runtimeDir, err := util.GetRootlessRuntimeDir()
- if err != nil {
- return err
+ if sessionID == "" {
+ return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
}
- parentPipe, childPipe, err := newPipe()
+ // create sync pipe to receive the pid
+ parentSyncPipe, childSyncPipe, err := newPipe()
if err != nil {
- return errors.Wrapf(err, "error creating socket pair")
+ return -1, nil, errors.Wrapf(err, "error creating socket pair")
}
+ defer errorhandling.CloseQuiet(parentSyncPipe)
+
+ // create start pipe to set the cgroup before running
+ // attachToExec is responsible for closing parentStartPipe
childStartPipe, parentStartPipe, err := newPipe()
if err != nil {
- return errors.Wrapf(err, "error creating socket pair for start pipe")
- }
-
- defer parentPipe.Close()
- defer parentStartPipe.Close()
-
- args := []string{}
- if r.cgroupManager == SystemdCgroupsManager {
- args = append(args, "-s")
- }
- args = append(args, "-c", ctr.ID())
- args = append(args, "-u", ctr.ID())
- args = append(args, "-n", ctr.Name())
- args = append(args, "-r", r.path)
- args = append(args, "-b", ctr.bundlePath())
- args = append(args, "-p", filepath.Join(ctr.state.RunDir, "pidfile"))
- args = append(args, "--exit-dir", r.exitsDir)
- if ctr.config.ConmonPidFile != "" {
- args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile)
- }
- if len(ctr.config.ExitCommand) > 0 {
- args = append(args, "--exit-command", ctr.config.ExitCommand[0])
- for _, arg := range ctr.config.ExitCommand[1:] {
- args = append(args, []string{"--exit-command-arg", arg}...)
- }
+ return -1, nil, errors.Wrapf(err, "error creating socket pair")
}
- args = append(args, "--socket-dir-path", r.socketsDir)
- if ctr.config.Spec.Process.Terminal {
- args = append(args, "-t")
- } else if ctr.config.Stdin {
- args = append(args, "-i")
+
+ // We want to make sure we close the parent{Start,Attach}Pipes if we fail
+ // but also don't want to close them after attach to exec is called
+ attachToExecCalled := false
+
+ defer func() {
+ if !attachToExecCalled {
+ errorhandling.CloseQuiet(parentStartPipe)
+ }
+ }()
+
+ // create the attach pipe to allow attach socket to be created before
+ // $RUNTIME exec starts running. This is to make sure we can capture all output
+ // from the process through that socket, rather than half reading the log, half attaching to the socket
+ // attachToExec is responsible for closing parentAttachPipe
+ parentAttachPipe, childAttachPipe, err := newPipe()
+ if err != nil {
+ return -1, nil, errors.Wrapf(err, "error creating socket pair")
}
- if r.logSizeMax >= 0 {
- args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax))
+
+ defer func() {
+ if !attachToExecCalled {
+ errorhandling.CloseQuiet(parentAttachPipe)
+ }
+ }()
+
+ childrenClosed := false
+ defer func() {
+ if !childrenClosed {
+ errorhandling.CloseQuiet(childSyncPipe)
+ errorhandling.CloseQuiet(childAttachPipe)
+ errorhandling.CloseQuiet(childStartPipe)
+ }
+ }()
+
+ runtimeDir, err := util.GetRootlessRuntimeDir()
+ if err != nil {
+ return -1, nil, err
}
- logDriver := KubernetesLogging
- if ctr.LogDriver() != "" {
- logDriver = ctr.LogDriver()
+ processFile, err := prepareProcessExec(c, cmd, env, tty, cwd, user, sessionID)
+ if err != nil {
+ return -1, nil, err
}
- args = append(args, "-l", fmt.Sprintf("%s:%s", logDriver, ctr.LogPath()))
- if r.noPivot {
- args = append(args, "--no-pivot")
+ var ociLog string
+ if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
+ ociLog = c.execOCILog(sessionID)
}
+ args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog)
- logLevel := logrus.GetLevel()
- args = append(args, "--log-level", logLevel.String())
+ if preserveFDs > 0 {
+ args = append(args, formatRuntimeOpts("--preserve-fds", string(preserveFDs))...)
+ }
- if logLevel == logrus.DebugLevel {
- logrus.Debugf("%s messages will be logged to syslog", r.conmonPath)
- args = append(args, "--syslog")
+ for _, capability := range capAdd {
+ args = append(args, formatRuntimeOpts("--cap", capability)...)
}
- if restoreOptions != nil {
- args = append(args, "--restore", ctr.CheckpointPath())
- if restoreOptions.TCPEstablished {
- args = append(args, "--restore-arg", "--tcp-established")
- }
+ if tty {
+ args = append(args, "-t")
}
+ // Append container ID and command
+ args = append(args, "-e")
+ // TODO make this optional when we can detach
+ args = append(args, "--exec-attach")
+ args = append(args, "--exec-process-spec", processFile.Name())
+
logrus.WithFields(logrus.Fields{
"args": args,
}).Debugf("running conmon: %s", r.conmonPath)
+ execCmd := exec.Command(r.conmonPath, args...)
- cmd := exec.Command(r.conmonPath, args...)
- cmd.Dir = ctr.bundlePath()
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Setpgid: true,
+ if streams.AttachInput {
+ execCmd.Stdin = streams.InputStream
+ }
+ if streams.AttachOutput {
+ execCmd.Stdout = streams.OutputStream
+ }
+ if streams.AttachError {
+ execCmd.Stderr = streams.ErrorStream
}
- // TODO this is probably a really bad idea for some uses
- // Make this configurable
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if ctr.config.Spec.Process.Terminal {
- cmd.Stderr = &stderrBuf
- }
-
- cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe, childStartPipe)
- // 0, 1 and 2 are stdin, stdout and stderr
- cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3))
- cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4))
- cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
- cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
- cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
- cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME")))
-
- if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() {
- ports, err := bindPorts(ctr.config.PortMappings)
- if err != nil {
- return err
- }
- // Leak the port we bound in the conmon process. These fd's won't be used
- // by the container and conmon will keep the ports busy so that another
- // process cannot use them.
- cmd.ExtraFiles = append(cmd.ExtraFiles, ports...)
+ conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir)
+ if err != nil {
+ return -1, nil, err
}
- if ctr.config.NetMode.IsSlirp4netns() {
- ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
- if err != nil {
- return errors.Wrapf(err, "failed to create rootless network sync pipe")
- }
- // Leak one end in conmon, the other one will be leaked into slirp4netns
- cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW)
- }
-
- if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok {
- cmd.Env = append(cmd.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify))
- }
- if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok {
- cmd.Env = append(cmd.Env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1")
- fds := activation.Files(false)
- cmd.ExtraFiles = append(cmd.ExtraFiles, fds...)
- }
- if selinux.GetEnabled() {
- // Set the label of the conmon process to be level :s0
- // This will allow the container processes to talk to fifo-files
- // passed into the container by conmon
- var (
- plabel string
- con selinux.Context
- )
- plabel, err = selinux.CurrentLabel()
- if err != nil {
- childPipe.Close()
- return errors.Wrapf(err, "Failed to get current SELinux label")
- }
+ // we don't want to step on users fds they asked to preserve
+ // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3
+ execCmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", preserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", preserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", preserveFDs+5))
+ execCmd.Env = append(execCmd.Env, conmonEnv...)
- con, err = selinux.NewContext(plabel)
- if err != nil {
- return errors.Wrapf(err, "Failed to get new context from SELinux label")
- }
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe)
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...)
+ execCmd.Dir = c.execBundlePath(sessionID)
+ execCmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ }
- runtime.LockOSThread()
- if con["level"] != "s0" && con["level"] != "" {
- con["level"] = "s0"
- if err = label.SetProcessLabel(con.Get()); err != nil {
- runtime.UnlockOSThread()
- return err
- }
+ if preserveFDs > 0 {
+ for fd := 3; fd < 3+preserveFDs; fd++ {
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
}
- err = cmd.Start()
- // Ignore error returned from SetProcessLabel("") call,
- // can't recover.
- label.SetProcessLabel("")
- runtime.UnlockOSThread()
- } else {
- err = cmd.Start()
- }
- if err != nil {
- childPipe.Close()
- return err
}
- defer cmd.Wait()
- // We don't need childPipe on the parent side
- childPipe.Close()
- childStartPipe.Close()
+ err = startCommandGivenSelinux(execCmd)
- // Move conmon to specified cgroup
- if err := r.moveConmonToCgroup(ctr, cgroupParent, cmd); err != nil {
- return err
- }
+ // We don't need children pipes on the parent side
+ errorhandling.CloseQuiet(childSyncPipe)
+ errorhandling.CloseQuiet(childAttachPipe)
+ errorhandling.CloseQuiet(childStartPipe)
+ childrenClosed = true
- /* We set the cgroup, now the child can start creating children */
- someData := []byte{0}
- _, err = parentStartPipe.Write(someData)
if err != nil {
- return err
+ return -1, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
}
-
- /* Wait for initial setup and fork, and reap child */
- err = cmd.Wait()
- if err != nil {
- return err
+ if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe, sessionID); err != nil {
+ return -1, nil, err
}
- defer func() {
- if err != nil {
- if err2 := r.deleteContainer(ctr); err2 != nil {
- logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID())
+ if preserveFDs > 0 {
+ for fd := 3; fd < 3+preserveFDs; fd++ {
+ // These fds were passed down to the runtime. Close them
+ // and not interfere
+ if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
+ logrus.Debugf("unable to close file fd-%d", fd)
}
}
- }()
-
- // Wait to get container pid from conmon
- type syncStruct struct {
- si *syncInfo
- err error
}
- ch := make(chan syncStruct)
+
+ // TODO Only create if !detach
+ // Attach to the container before starting it
+ attachChan := make(chan error)
go func() {
- var si *syncInfo
- rdr := bufio.NewReader(parentPipe)
- b, err := rdr.ReadBytes('\n')
- if err != nil {
- ch <- syncStruct{err: err}
- }
- if err := json.Unmarshal(b, &si); err != nil {
- ch <- syncStruct{err: err}
- return
- }
- ch <- syncStruct{si: si}
+ // attachToExec is responsible for closing pipes
+ attachChan <- c.attachToExec(streams, detachKeys, resize, sessionID, parentStartPipe, parentAttachPipe)
+ close(attachChan)
}()
+ attachToExecCalled = true
- select {
- case ss := <-ch:
- if ss.err != nil {
- return errors.Wrapf(ss.err, "error reading container (probably exited) json message")
- }
- logrus.Debugf("Received container pid: %d", ss.si.Pid)
- if ss.si.Pid == -1 {
- if ss.si.Message != "" {
- return errors.Wrapf(ErrInternal, "container create failed: %s", ss.si.Message)
- }
- return errors.Wrapf(ErrInternal, "container create failed")
- }
- ctr.state.PID = ss.si.Pid
- case <-time.After(ContainerCreateTimeout):
- return errors.Wrapf(ErrInternal, "container creation timeout")
- }
- return nil
+ pid, err := readConmonPipeData(parentSyncPipe, ociLog)
+
+ return pid, attachChan, err
}
// Wait for a container which has been sent a signal to stop
diff --git a/libpod/oci_unsupported.go b/libpod/oci_unsupported.go
index 12183faf3..4a65d4d1d 100644
--- a/libpod/oci_unsupported.go
+++ b/libpod/oci_unsupported.go
@@ -5,18 +5,21 @@ package libpod
import (
"os"
"os/exec"
+
+ "github.com/containers/libpod/libpod/define"
+ "k8s.io/client-go/tools/remotecommand"
)
func (r *OCIRuntime) moveConmonToCgroup(ctr *Container, cgroupParent string, cmd *exec.Cmd) error {
- return ErrOSNotSupported
+ return define.ErrOSNotSupported
}
func newPipe() (parent *os.File, child *os.File, err error) {
- return nil, nil, ErrNotImplemented
+ return nil, nil, define.ErrNotImplemented
}
-func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
- return ErrNotImplemented
+func (r *OCIRuntime) createContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) {
+ return define.ErrNotImplemented
}
func (r *OCIRuntime) pathPackage() string {
@@ -28,13 +31,17 @@ func (r *OCIRuntime) conmonPackage() string {
}
func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
- return ErrOSNotSupported
+ return define.ErrOSNotSupported
}
func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error {
- return ErrOSNotSupported
+ return define.ErrOSNotSupported
}
func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error {
- return ErrOSNotSupported
+ return define.ErrOSNotSupported
+}
+
+func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty bool, cwd, user, sessionID string, streams *AttachStreams, preserveFDs int, resize chan remotecommand.TerminalSize, detachKeys string) (int, chan error, error) {
+ return -1, nil, define.ErrOSNotSupported
}
diff --git a/libpod/options.go b/libpod/options.go
index 20aa51981..81d3aa64f 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -8,6 +8,7 @@ import (
"syscall"
"github.com/containers/image/manifest"
+ config2 "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
@@ -19,7 +20,7 @@ import (
var (
nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$")
- regexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*")
+ regexError = errors.Wrapf(config2.ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*")
)
// Runtime Creation Options
@@ -30,7 +31,7 @@ var (
func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
setField := false
@@ -104,7 +105,7 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
func WithDefaultTransport(defaultTransport string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.ImageDefaultTransport = defaultTransport
@@ -120,7 +121,7 @@ func WithDefaultTransport(defaultTransport string) RuntimeOption {
func WithSignaturePolicy(path string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.SignaturePolicyPath = path
@@ -136,11 +137,11 @@ func WithSignaturePolicy(path string) RuntimeOption {
func WithStateType(storeType RuntimeStateStore) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
if storeType == InvalidStateStore {
- return errors.Wrapf(ErrInvalidArg, "must provide a valid state store type")
+ return errors.Wrapf(config2.ErrInvalidArg, "must provide a valid state store type")
}
rt.config.StateType = storeType
@@ -153,11 +154,11 @@ func WithStateType(storeType RuntimeStateStore) RuntimeOption {
func WithOCIRuntime(runtime string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
if runtime == "" {
- return errors.Wrapf(ErrInvalidArg, "must provide a valid path")
+ return errors.Wrapf(config2.ErrInvalidArg, "must provide a valid path")
}
rt.config.OCIRuntime = runtime
@@ -172,11 +173,11 @@ func WithOCIRuntime(runtime string) RuntimeOption {
func WithConmonPath(path string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
if path == "" {
- return errors.Wrapf(ErrInvalidArg, "must provide a valid path")
+ return errors.Wrapf(config2.ErrInvalidArg, "must provide a valid path")
}
rt.config.ConmonPath = []string{path}
@@ -189,7 +190,7 @@ func WithConmonPath(path string) RuntimeOption {
func WithConmonEnv(environment []string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.ConmonEnvVars = make([]string, len(environment))
@@ -204,7 +205,7 @@ func WithConmonEnv(environment []string) RuntimeOption {
func WithNetworkCmdPath(path string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.NetworkCmdPath = path
@@ -219,11 +220,11 @@ func WithNetworkCmdPath(path string) RuntimeOption {
func WithCgroupManager(manager string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
if manager != CgroupfsCgroupsManager && manager != SystemdCgroupsManager {
- return errors.Wrapf(ErrInvalidArg, "CGroup manager must be one of %s and %s",
+ return errors.Wrapf(config2.ErrInvalidArg, "CGroup manager must be one of %s and %s",
CgroupfsCgroupsManager, SystemdCgroupsManager)
}
@@ -238,7 +239,7 @@ func WithCgroupManager(manager string) RuntimeOption {
func WithStaticDir(dir string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.StaticDir = dir
@@ -252,12 +253,12 @@ func WithStaticDir(dir string) RuntimeOption {
func WithHooksDir(hooksDirs ...string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
for _, hooksDir := range hooksDirs {
if hooksDir == "" {
- return errors.Wrap(ErrInvalidArg, "empty-string hook directories are not supported")
+ return errors.Wrap(config2.ErrInvalidArg, "empty-string hook directories are not supported")
}
}
@@ -273,11 +274,11 @@ func WithHooksDir(hooksDirs ...string) RuntimeOption {
func WithDefaultMountsFile(mountsFile string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
if mountsFile == "" {
- return ErrInvalidArg
+ return config2.ErrInvalidArg
}
rt.config.DefaultMountsFile = mountsFile
return nil
@@ -290,7 +291,7 @@ func WithDefaultMountsFile(mountsFile string) RuntimeOption {
func WithTmpDir(dir string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.TmpDir = dir
rt.configuredFrom.libpodTmpDirSet = true
@@ -299,12 +300,21 @@ func WithTmpDir(dir string) RuntimeOption {
}
}
+// WithNoStore sets a bool on the runtime that we do not need
+// any containers storage.
+func WithNoStore() RuntimeOption {
+ return func(rt *Runtime) error {
+ rt.noStore = true
+ return nil
+ }
+}
+
// WithMaxLogSize sets the maximum size of container logs.
// Positive sizes are limits in bytes, -1 is unlimited.
func WithMaxLogSize(limit int64) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.MaxLogSize = limit
@@ -315,10 +325,10 @@ func WithMaxLogSize(limit int64) RuntimeOption {
// WithNoPivotRoot sets the runtime to use MS_MOVE instead of PIVOT_ROOT when
// starting containers.
-func WithNoPivotRoot(noPivot bool) RuntimeOption {
+func WithNoPivotRoot() RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.NoPivotRoot = true
@@ -331,7 +341,7 @@ func WithNoPivotRoot(noPivot bool) RuntimeOption {
func WithCNIConfigDir(dir string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.CNIConfigDir = dir
@@ -344,7 +354,7 @@ func WithCNIConfigDir(dir string) RuntimeOption {
func WithCNIPluginDir(dir string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.CNIPluginDir = []string{dir}
@@ -364,7 +374,7 @@ func WithCNIPluginDir(dir string) RuntimeOption {
func WithNamespace(ns string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.Namespace = ns
@@ -380,7 +390,7 @@ func WithNamespace(ns string) RuntimeOption {
func WithVolumePath(volPath string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.VolumePath = volPath
@@ -398,7 +408,7 @@ func WithVolumePath(volPath string) RuntimeOption {
func WithDefaultInfraImage(img string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.InfraImage = img
@@ -412,7 +422,7 @@ func WithDefaultInfraImage(img string) RuntimeOption {
func WithDefaultInfraCommand(cmd string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.config.InfraCommand = cmd
@@ -428,7 +438,7 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
func WithRenumber() RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.doRenumber = true
@@ -443,7 +453,7 @@ func WithRenumber() RuntimeOption {
func WithMigrate() RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
- return ErrRuntimeFinalized
+ return config2.ErrRuntimeFinalized
}
rt.doMigrate = true
@@ -458,7 +468,7 @@ func WithMigrate() RuntimeOption {
func WithShmDir(dir string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.ShmDir = dir
@@ -470,7 +480,7 @@ func WithShmDir(dir string) CtrCreateOption {
func WithSystemd() CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.Systemd = true
@@ -482,7 +492,7 @@ func WithSystemd() CtrCreateOption {
func WithShmSize(size int64) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.ShmSize = size
@@ -494,7 +504,7 @@ func WithShmSize(size int64) CtrCreateOption {
func WithPrivileged(privileged bool) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.Privileged = privileged
@@ -506,7 +516,7 @@ func WithPrivileged(privileged bool) CtrCreateOption {
func WithSecLabels(labelOpts []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.LabelOpts = labelOpts
return nil
@@ -518,7 +528,7 @@ func WithSecLabels(labelOpts []string) CtrCreateOption {
func WithUser(user string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.User = user
@@ -534,14 +544,14 @@ func WithUser(user string) CtrCreateOption {
func WithRootFSFromImage(imageID string, imageName string, useImageVolumes bool) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if ctr.config.RootfsImageID != "" || ctr.config.RootfsImageName != "" {
- return errors.Wrapf(ErrInvalidArg, "container already configured with root filesystem")
+ return errors.Wrapf(config2.ErrInvalidArg, "container already configured with root filesystem")
}
if ctr.config.Rootfs != "" {
- return errors.Wrapf(ErrInvalidArg, "cannot set both an image ID and a rootfs for a container")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot set both an image ID and a rootfs for a container")
}
ctr.config.RootfsImageID = imageID
@@ -556,7 +566,7 @@ func WithRootFSFromImage(imageID string, imageName string, useImageVolumes bool)
func WithStdin() CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.Stdin = true
@@ -572,11 +582,11 @@ func WithStdin() CtrCreateOption {
func (r *Runtime) WithPod(pod *Pod) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if pod == nil {
- return ErrInvalidArg
+ return config2.ErrInvalidArg
}
ctr.config.Pod = pod.ID()
@@ -589,7 +599,7 @@ func (r *Runtime) WithPod(pod *Pod) CtrCreateOption {
func WithLabels(labels map[string]string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.Labels = make(map[string]string)
@@ -605,7 +615,7 @@ func WithLabels(labels map[string]string) CtrCreateOption {
func WithName(name string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
// Check the name against a regex
@@ -623,13 +633,13 @@ func WithName(name string) CtrCreateOption {
func WithStopSignal(signal syscall.Signal) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if signal == 0 {
- return errors.Wrapf(ErrInvalidArg, "stop signal cannot be 0")
+ return errors.Wrapf(config2.ErrInvalidArg, "stop signal cannot be 0")
} else if signal > 64 {
- return errors.Wrapf(ErrInvalidArg, "stop signal cannot be greater than 64 (SIGRTMAX)")
+ return errors.Wrapf(config2.ErrInvalidArg, "stop signal cannot be greater than 64 (SIGRTMAX)")
}
ctr.config.StopSignal = uint(signal)
@@ -643,7 +653,7 @@ func WithStopSignal(signal syscall.Signal) CtrCreateOption {
func WithStopTimeout(timeout uint) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.StopTimeout = timeout
@@ -656,7 +666,7 @@ func WithStopTimeout(timeout uint) CtrCreateOption {
func WithIDMappings(idmappings storage.IDMappingOptions) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.IDMappings = idmappings
@@ -668,7 +678,7 @@ func WithIDMappings(idmappings storage.IDMappingOptions) CtrCreateOption {
func WithExitCommand(exitCommand []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.ExitCommand = append(exitCommand, ctr.ID())
@@ -681,7 +691,7 @@ func WithExitCommand(exitCommand []string) CtrCreateOption {
func WithUTSNSFromPod(p *Pod) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if err := validPodNSOption(p, ctr.config.Pod); err != nil {
@@ -705,19 +715,19 @@ func WithUTSNSFromPod(p *Pod) CtrCreateOption {
func WithIPCNSFrom(nsCtr *Container) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if !nsCtr.valid {
- return ErrCtrRemoved
+ return config2.ErrCtrRemoved
}
if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(ErrInvalidArg, "must specify another container")
+ return errors.Wrapf(config2.ErrInvalidArg, "must specify another container")
}
if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
}
ctr.config.IPCNsCtr = nsCtr.ID()
@@ -733,19 +743,19 @@ func WithIPCNSFrom(nsCtr *Container) CtrCreateOption {
func WithMountNSFrom(nsCtr *Container) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if !nsCtr.valid {
- return ErrCtrRemoved
+ return config2.ErrCtrRemoved
}
if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(ErrInvalidArg, "must specify another container")
+ return errors.Wrapf(config2.ErrInvalidArg, "must specify another container")
}
if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
}
ctr.config.MountNsCtr = nsCtr.ID()
@@ -761,23 +771,23 @@ func WithMountNSFrom(nsCtr *Container) CtrCreateOption {
func WithNetNSFrom(nsCtr *Container) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if !nsCtr.valid {
- return ErrCtrRemoved
+ return config2.ErrCtrRemoved
}
if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(ErrInvalidArg, "must specify another container")
+ return errors.Wrapf(config2.ErrInvalidArg, "must specify another container")
}
if ctr.config.CreateNetNS {
- return errors.Wrapf(ErrInvalidArg, "cannot join another container's net ns as we are making a new net ns")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot join another container's net ns as we are making a new net ns")
}
if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
}
ctr.config.NetNsCtr = nsCtr.ID()
@@ -793,19 +803,19 @@ func WithNetNSFrom(nsCtr *Container) CtrCreateOption {
func WithPIDNSFrom(nsCtr *Container) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if !nsCtr.valid {
- return ErrCtrRemoved
+ return config2.ErrCtrRemoved
}
if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(ErrInvalidArg, "must specify another container")
+ return errors.Wrapf(config2.ErrInvalidArg, "must specify another container")
}
if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
}
ctr.config.PIDNsCtr = nsCtr.ID()
@@ -821,22 +831,23 @@ func WithPIDNSFrom(nsCtr *Container) CtrCreateOption {
func WithUserNSFrom(nsCtr *Container) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if !nsCtr.valid {
- return ErrCtrRemoved
+ return config2.ErrCtrRemoved
}
if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(ErrInvalidArg, "must specify another container")
+ return errors.Wrapf(config2.ErrInvalidArg, "must specify another container")
}
if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
}
ctr.config.UserNsCtr = nsCtr.ID()
+ ctr.config.IDMappings = nsCtr.config.IDMappings
return nil
}
@@ -849,19 +860,19 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption {
func WithUTSNSFrom(nsCtr *Container) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if !nsCtr.valid {
- return ErrCtrRemoved
+ return config2.ErrCtrRemoved
}
if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(ErrInvalidArg, "must specify another container")
+ return errors.Wrapf(config2.ErrInvalidArg, "must specify another container")
}
if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
}
ctr.config.UTSNsCtr = nsCtr.ID()
@@ -877,19 +888,19 @@ func WithUTSNSFrom(nsCtr *Container) CtrCreateOption {
func WithCgroupNSFrom(nsCtr *Container) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if !nsCtr.valid {
- return ErrCtrRemoved
+ return config2.ErrCtrRemoved
}
if nsCtr.ID() == ctr.ID() {
- return errors.Wrapf(ErrInvalidArg, "must specify another container")
+ return errors.Wrapf(config2.ErrInvalidArg, "must specify another container")
}
if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod {
- return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
+ return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID())
}
ctr.config.CgroupNsCtr = nsCtr.ID()
@@ -903,22 +914,22 @@ func WithCgroupNSFrom(nsCtr *Container) CtrCreateOption {
func WithDependencyCtrs(ctrs []*Container) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
deps := make([]string, 0, len(ctrs))
for _, dep := range ctrs {
if !dep.valid {
- return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", dep.ID())
+ return errors.Wrapf(config2.ErrCtrRemoved, "container %s is not valid", dep.ID())
}
if dep.ID() == ctr.ID() {
- return errors.Wrapf(ErrInvalidArg, "must specify another container")
+ return errors.Wrapf(config2.ErrInvalidArg, "must specify another container")
}
if ctr.config.Pod != "" && dep.config.Pod != ctr.config.Pod {
- return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, dep.ID())
+ return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, dep.ID())
}
deps = append(deps, dep.ID())
@@ -937,11 +948,11 @@ func WithDependencyCtrs(ctrs []*Container) CtrCreateOption {
func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmode string, networks []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if ctr.config.NetNsCtr != "" {
- return errors.Wrapf(ErrInvalidArg, "container is already set to join another container's net ns, cannot create a new net ns")
+ return errors.Wrapf(config2.ErrInvalidArg, "container is already set to join another container's net ns, cannot create a new net ns")
}
ctr.config.PostConfigureNetNS = postConfigureNetNS
@@ -962,15 +973,15 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmo
func WithStaticIP(ip net.IP) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if !ctr.config.CreateNetNS {
- return errors.Wrapf(ErrInvalidArg, "cannot set a static IP if the container is not creating a network namespace")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot set a static IP if the container is not creating a network namespace")
}
if len(ctr.config.Networks) != 0 {
- return errors.Wrapf(ErrInvalidArg, "cannot set a static IP if joining additional CNI networks")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot set a static IP if joining additional CNI networks")
}
ctr.config.StaticIP = ip
@@ -983,15 +994,15 @@ func WithStaticIP(ip net.IP) CtrCreateOption {
func WithLogDriver(driver string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
switch driver {
case "":
- return errors.Wrapf(ErrInvalidArg, "log driver must be set")
+ return errors.Wrapf(config2.ErrInvalidArg, "log driver must be set")
case JournaldLogging, KubernetesLogging, JSONLogging:
break
default:
- return errors.Wrapf(ErrInvalidArg, "invalid log driver")
+ return errors.Wrapf(config2.ErrInvalidArg, "invalid log driver")
}
ctr.config.LogDriver = driver
@@ -1004,10 +1015,10 @@ func WithLogDriver(driver string) CtrCreateOption {
func WithLogPath(path string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if path == "" {
- return errors.Wrapf(ErrInvalidArg, "log path must be set")
+ return errors.Wrapf(config2.ErrInvalidArg, "log path must be set")
}
ctr.config.LogPath = path
@@ -1020,11 +1031,11 @@ func WithLogPath(path string) CtrCreateOption {
func WithCgroupParent(parent string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if parent == "" {
- return errors.Wrapf(ErrInvalidArg, "cgroup parent cannot be empty")
+ return errors.Wrapf(config2.ErrInvalidArg, "cgroup parent cannot be empty")
}
ctr.config.CgroupParent = parent
@@ -1037,10 +1048,10 @@ func WithCgroupParent(parent string) CtrCreateOption {
func WithDNSSearch(searchDomains []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if ctr.config.UseImageResolvConf {
- return errors.Wrapf(ErrInvalidArg, "cannot add DNS search domains if container will not create /etc/resolv.conf")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot add DNS search domains if container will not create /etc/resolv.conf")
}
ctr.config.DNSSearch = searchDomains
return nil
@@ -1051,16 +1062,16 @@ func WithDNSSearch(searchDomains []string) CtrCreateOption {
func WithDNS(dnsServers []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if ctr.config.UseImageResolvConf {
- return errors.Wrapf(ErrInvalidArg, "cannot add DNS servers if container will not create /etc/resolv.conf")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot add DNS servers if container will not create /etc/resolv.conf")
}
var dns []net.IP
for _, i := range dnsServers {
result := net.ParseIP(i)
if result == nil {
- return errors.Wrapf(ErrInvalidArg, "invalid IP address %s", i)
+ return errors.Wrapf(config2.ErrInvalidArg, "invalid IP address %s", i)
}
dns = append(dns, result)
}
@@ -1073,10 +1084,10 @@ func WithDNS(dnsServers []string) CtrCreateOption {
func WithDNSOption(dnsOptions []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if ctr.config.UseImageResolvConf {
- return errors.Wrapf(ErrInvalidArg, "cannot add DNS options if container will not create /etc/resolv.conf")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot add DNS options if container will not create /etc/resolv.conf")
}
ctr.config.DNSOption = dnsOptions
return nil
@@ -1087,11 +1098,11 @@ func WithDNSOption(dnsOptions []string) CtrCreateOption {
func WithHosts(hosts []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if ctr.config.UseImageHosts {
- return errors.Wrapf(ErrInvalidArg, "cannot add hosts if container will not create /etc/hosts")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot add hosts if container will not create /etc/hosts")
}
ctr.config.HostAdd = hosts
@@ -1104,7 +1115,7 @@ func WithHosts(hosts []string) CtrCreateOption {
func WithConmonPidFile(path string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.ConmonPidFile = path
return nil
@@ -1116,7 +1127,7 @@ func WithConmonPidFile(path string) CtrCreateOption {
func WithGroups(groups []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.Groups = groups
return nil
@@ -1127,23 +1138,22 @@ func WithGroups(groups []string) CtrCreateOption {
// These are not added to the container's spec, but will instead be used during
// commit to populate the volumes of the new image, and to trigger some OCI
// hooks that are only added if volume mounts are present.
+// Furthermore, they are used in the output of inspect, to filter volumes -
+// only volumes included in this list will be included in the output.
// Unless explicitly set, committed images will have no volumes.
// The given volumes slice must not be nil.
func WithUserVolumes(volumes []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if volumes == nil {
- return ErrInvalidArg
+ return config2.ErrInvalidArg
}
ctr.config.UserVolumes = make([]string, 0, len(volumes))
- for _, vol := range volumes {
- ctr.config.UserVolumes = append(ctr.config.UserVolumes, vol)
- }
-
+ ctr.config.UserVolumes = append(ctr.config.UserVolumes, volumes...)
return nil
}
}
@@ -1156,14 +1166,11 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
func WithEntrypoint(entrypoint []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.Entrypoint = make([]string, 0, len(entrypoint))
- for _, str := range entrypoint {
- ctr.config.Entrypoint = append(ctr.config.Entrypoint, str)
- }
-
+ ctr.config.Entrypoint = append(ctr.config.Entrypoint, entrypoint...)
return nil
}
}
@@ -1176,14 +1183,11 @@ func WithEntrypoint(entrypoint []string) CtrCreateOption {
func WithCommand(command []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.Command = make([]string, 0, len(command))
- for _, str := range command {
- ctr.config.Command = append(ctr.config.Command, str)
- }
-
+ ctr.config.Command = append(ctr.config.Command, command...)
return nil
}
}
@@ -1193,13 +1197,13 @@ func WithCommand(command []string) CtrCreateOption {
func WithRootFS(rootfs string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if _, err := os.Stat(rootfs); err != nil {
return errors.Wrapf(err, "error checking path %q", rootfs)
}
if ctr.config.RootfsImageID != "" {
- return errors.Wrapf(ErrInvalidArg, "cannot set both an image ID and a rootfs for a container")
+ return errors.Wrapf(config2.ErrInvalidArg, "cannot set both an image ID and a rootfs for a container")
}
ctr.config.Rootfs = rootfs
return nil
@@ -1213,7 +1217,7 @@ func WithRootFS(rootfs string) CtrCreateOption {
func WithCtrNamespace(ns string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.Namespace = ns
@@ -1227,13 +1231,13 @@ func WithCtrNamespace(ns string) CtrCreateOption {
func WithUseImageResolvConf() CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if len(ctr.config.DNSServer) != 0 ||
len(ctr.config.DNSSearch) != 0 ||
len(ctr.config.DNSOption) != 0 {
- return errors.Wrapf(ErrInvalidArg, "not creating resolv.conf conflicts with DNS options")
+ return errors.Wrapf(config2.ErrInvalidArg, "not creating resolv.conf conflicts with DNS options")
}
ctr.config.UseImageResolvConf = true
@@ -1247,11 +1251,11 @@ func WithUseImageResolvConf() CtrCreateOption {
func WithUseImageHosts() CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
if len(ctr.config.HostAdd) != 0 {
- return errors.Wrapf(ErrInvalidArg, "not creating /etc/hosts conflicts with adding to the hosts file")
+ return errors.Wrapf(config2.ErrInvalidArg, "not creating /etc/hosts conflicts with adding to the hosts file")
}
ctr.config.UseImageHosts = true
@@ -1266,14 +1270,14 @@ func WithUseImageHosts() CtrCreateOption {
func WithRestartPolicy(policy string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
switch policy {
case RestartPolicyNone, RestartPolicyNo, RestartPolicyOnFailure, RestartPolicyAlways:
ctr.config.RestartPolicy = policy
default:
- return errors.Wrapf(ErrInvalidArg, "%q is not a valid restart policy", policy)
+ return errors.Wrapf(config2.ErrInvalidArg, "%q is not a valid restart policy", policy)
}
return nil
@@ -1286,7 +1290,7 @@ func WithRestartPolicy(policy string) CtrCreateOption {
func WithRestartRetries(tries uint) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.RestartRetries = tries
@@ -1300,7 +1304,7 @@ func WithRestartRetries(tries uint) CtrCreateOption {
func withIsInfra() CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.IsInfra = true
@@ -1313,7 +1317,7 @@ func withIsInfra() CtrCreateOption {
func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
destinations := make(map[string]bool)
@@ -1323,7 +1327,7 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
// If they don't we will automatically create them.
if _, ok := destinations[vol.Dest]; ok {
- return errors.Wrapf(ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ return errors.Wrapf(config2.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
}
destinations[vol.Dest] = true
@@ -1344,7 +1348,7 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
func WithVolumeName(name string) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
- return ErrVolumeFinalized
+ return config2.ErrVolumeFinalized
}
// Check the name against a regex
@@ -1361,7 +1365,7 @@ func WithVolumeName(name string) VolumeCreateOption {
func WithVolumeLabels(labels map[string]string) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
- return ErrVolumeFinalized
+ return config2.ErrVolumeFinalized
}
volume.config.Labels = make(map[string]string)
@@ -1377,7 +1381,7 @@ func WithVolumeLabels(labels map[string]string) VolumeCreateOption {
func WithVolumeDriver(driver string) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
- return ErrVolumeFinalized
+ return config2.ErrVolumeFinalized
}
volume.config.Driver = driver
@@ -1390,7 +1394,7 @@ func WithVolumeDriver(driver string) VolumeCreateOption {
func WithVolumeOptions(options map[string]string) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
- return ErrVolumeFinalized
+ return config2.ErrVolumeFinalized
}
volume.config.Options = make(map[string]string)
@@ -1406,7 +1410,7 @@ func WithVolumeOptions(options map[string]string) VolumeCreateOption {
func WithVolumeUID(uid int) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
- return ErrVolumeFinalized
+ return config2.ErrVolumeFinalized
}
volume.config.UID = uid
@@ -1419,7 +1423,7 @@ func WithVolumeUID(uid int) VolumeCreateOption {
func WithVolumeGID(gid int) VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
- return ErrVolumeFinalized
+ return config2.ErrVolumeFinalized
}
volume.config.GID = gid
@@ -1435,7 +1439,7 @@ func WithVolumeGID(gid int) VolumeCreateOption {
func withSetCtrSpecific() VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
- return ErrVolumeFinalized
+ return config2.ErrVolumeFinalized
}
volume.config.IsCtrSpecific = true
@@ -1450,7 +1454,7 @@ func withSetCtrSpecific() VolumeCreateOption {
func WithPodName(name string) PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
// Check the name against a regex
@@ -1468,7 +1472,7 @@ func WithPodName(name string) PodCreateOption {
func WithPodLabels(labels map[string]string) PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.Labels = make(map[string]string)
@@ -1484,7 +1488,7 @@ func WithPodLabels(labels map[string]string) PodCreateOption {
func WithPodCgroupParent(path string) PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.CgroupParent = path
@@ -1500,7 +1504,7 @@ func WithPodCgroupParent(path string) PodCreateOption {
func WithPodCgroups() PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.UsePodCgroup = true
@@ -1517,7 +1521,7 @@ func WithPodCgroups() PodCreateOption {
func WithPodNamespace(ns string) PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.Namespace = ns
@@ -1533,7 +1537,7 @@ func WithPodNamespace(ns string) PodCreateOption {
func WithPodIPC() PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.UsePodIPC = true
@@ -1549,7 +1553,7 @@ func WithPodIPC() PodCreateOption {
func WithPodNet() PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.UsePodNet = true
@@ -1567,7 +1571,7 @@ func WithPodNet() PodCreateOption {
func WithPodMount() PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.UsePodMount = true
@@ -1585,7 +1589,7 @@ func WithPodMount() PodCreateOption {
func WithPodUser() PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.UsePodUser = true
@@ -1601,7 +1605,7 @@ func WithPodUser() PodCreateOption {
func WithPodPID() PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.UsePodPID = true
@@ -1617,7 +1621,7 @@ func WithPodPID() PodCreateOption {
func WithPodUTS() PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.UsePodUTS = true
@@ -1630,7 +1634,7 @@ func WithPodUTS() PodCreateOption {
func WithInfraContainer() PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.InfraContainer.HasInfraContainer = true
@@ -1643,7 +1647,7 @@ func WithInfraContainer() PodCreateOption {
func WithInfraContainerPorts(bindings []ocicni.PortMapping) PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
- return ErrPodFinalized
+ return config2.ErrPodFinalized
}
pod.config.InfraContainer.PortBindings = bindings
return nil
@@ -1654,7 +1658,7 @@ func WithInfraContainerPorts(bindings []ocicni.PortMapping) PodCreateOption {
func WithHealthCheck(healthCheck *manifest.Schema2HealthConfig) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
- return ErrCtrFinalized
+ return config2.ErrCtrFinalized
}
ctr.config.HealthCheckConfig = healthCheck
return nil
diff --git a/libpod/pod.go b/libpod/pod.go
index 4ce697402..60626bfd7 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -3,6 +3,7 @@ package libpod
import (
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/pkg/errors"
@@ -18,7 +19,6 @@ import (
// assume their callers handled this requirement. Generally speaking, if a
// function takes the pod lock and accesses any part of state, it should
// updatePod() immediately after locking.
-// ffjson: skip
// Pod represents a group of containers that may share namespaces
type Pod struct {
config *PodConfig
@@ -30,7 +30,6 @@ type Pod struct {
}
// PodConfig represents a pod's static configuration
-// easyjson:json
type PodConfig struct {
ID string `json:"id"`
Name string `json:"name"`
@@ -66,7 +65,6 @@ type PodConfig struct {
}
// podState represents a pod's state
-// easyjson:json
type podState struct {
// CgroupPath is the path to the pod's CGroup
CgroupPath string `json:"cgroupPath"`
@@ -77,7 +75,6 @@ type podState struct {
// PodInspect represents the data we want to display for
// podman pod inspect
-// easyjson:json
type PodInspect struct {
Config *PodConfig
State *PodInspectState
@@ -85,14 +82,12 @@ type PodInspect struct {
}
// PodInspectState contains inspect data on the pod's state
-// easyjson:json
type PodInspectState struct {
CgroupPath string `json:"cgroupPath"`
InfraContainerID string `json:"infraContainerID"`
}
// PodContainerInfo keeps information on a container in a pod
-// easyjson:json
type PodContainerInfo struct {
ID string `json:"id"`
State string `json:"state"`
@@ -196,7 +191,7 @@ func (p *Pod) CgroupPath() (string, error) {
// HasContainer checks if a container is present in the pod
func (p *Pod) HasContainer(id string) (bool, error) {
if !p.valid {
- return false, ErrPodRemoved
+ return false, define.ErrPodRemoved
}
return p.runtime.state.PodHasContainer(p, id)
@@ -208,7 +203,7 @@ func (p *Pod) AllContainersByID() ([]string, error) {
defer p.lock.Unlock()
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
return p.runtime.state.PodContainersByID(p)
@@ -217,7 +212,7 @@ func (p *Pod) AllContainersByID() ([]string, error) {
// AllContainers retrieves the containers in the pod
func (p *Pod) AllContainers() ([]*Container, error) {
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
p.lock.Lock()
defer p.lock.Unlock()
@@ -286,7 +281,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*ContainerStats) (ma
newStats, err := c.GetContainerStats(prevStat)
// If the container wasn't running, don't include it
// but also suppress the error
- if err != nil && errors.Cause(err) != ErrCtrStateInvalid {
+ if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid {
return nil, err
}
if err == nil {
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 9ed5c88eb..c7b0353bd 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -3,6 +3,7 @@ package libpod
import (
"context"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -27,7 +28,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
defer p.lock.Unlock()
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
allCtrs, err := p.runtime.state.PodContainers(p)
@@ -47,7 +48,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
// If there are no containers without dependencies, we can't start
// Error out
if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
}
// Traverse the graph beginning at nodes with no dependencies
@@ -56,7 +57,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error starting some containers")
+ return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error starting some containers")
}
defer p.newPodEvent(events.Start)
return nil, nil
@@ -88,7 +89,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
defer p.lock.Unlock()
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
allCtrs, err := p.runtime.state.PodContainers(p)
@@ -112,7 +113,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
}
// Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
@@ -136,7 +137,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
+ return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error stopping some containers")
}
defer p.newPodEvent(events.Stop)
return nil, nil
@@ -159,7 +160,7 @@ func (p *Pod) Pause() (map[string]error, error) {
defer p.lock.Unlock()
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
allCtrs, err := p.runtime.state.PodContainers(p)
@@ -180,7 +181,7 @@ func (p *Pod) Pause() (map[string]error, error) {
}
// Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
@@ -195,7 +196,7 @@ func (p *Pod) Pause() (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error pausing some containers")
+ return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error pausing some containers")
}
defer p.newPodEvent(events.Pause)
return nil, nil
@@ -218,7 +219,7 @@ func (p *Pod) Unpause() (map[string]error, error) {
defer p.lock.Unlock()
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
allCtrs, err := p.runtime.state.PodContainers(p)
@@ -239,7 +240,7 @@ func (p *Pod) Unpause() (map[string]error, error) {
}
// Ignore containers that are not paused
- if ctr.state.State != ContainerStatePaused {
+ if ctr.state.State != define.ContainerStatePaused {
ctr.lock.Unlock()
continue
}
@@ -254,7 +255,7 @@ func (p *Pod) Unpause() (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error unpausing some containers")
+ return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error unpausing some containers")
}
defer p.newPodEvent(events.Unpause)
@@ -279,7 +280,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
defer p.lock.Unlock()
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
allCtrs, err := p.runtime.state.PodContainers(p)
@@ -299,7 +300,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
// If there are no containers without dependencies, we can't start
// Error out
if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
}
// Traverse the graph beginning at nodes with no dependencies
@@ -308,7 +309,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
+ return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error stopping some containers")
}
p.newPodEvent(events.Stop)
p.newPodEvent(events.Start)
@@ -331,7 +332,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
defer p.lock.Unlock()
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
allCtrs, err := p.runtime.state.PodContainers(p)
@@ -352,12 +353,12 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
}
// Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
- if err := ctr.runtime.ociRuntime.killContainer(ctr, signal); err != nil {
+ if err := ctr.ociRuntime.killContainer(ctr, signal); err != nil {
ctr.lock.Unlock()
ctrErrors[ctr.ID()] = err
continue
@@ -374,7 +375,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
}
if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error killing some containers")
+ return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error killing some containers")
}
defer p.newPodEvent(events.Kill)
return nil, nil
@@ -382,12 +383,12 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
// Status gets the status of all containers in the pod
// Returns a map of Container ID to Container Status
-func (p *Pod) Status() (map[string]ContainerStatus, error) {
+func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
p.lock.Lock()
defer p.lock.Unlock()
if !p.valid {
- return nil, ErrPodRemoved
+ return nil, define.ErrPodRemoved
}
allCtrs, err := p.runtime.state.PodContainers(p)
@@ -402,7 +403,7 @@ func (p *Pod) Status() (map[string]ContainerStatus, error) {
}
// Now that all containers are locked, get their status
- status := make(map[string]ContainerStatus, len(allCtrs))
+ status := make(map[string]define.ContainerStatus, len(allCtrs))
for _, ctr := range allCtrs {
if err := ctr.syncContainer(); err != nil {
return nil, err
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 1fcb5b1a6..7aacda482 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -5,6 +5,7 @@ import (
"path/filepath"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -52,13 +53,13 @@ func (p *Pod) refresh() error {
}
if !p.valid {
- return ErrPodRemoved
+ return define.ErrPodRemoved
}
// Retrieve the pod's lock
lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID())
+ return errors.Wrapf(err, "error retrieving lock %d for pod %s", p.config.LockID, p.ID())
}
p.lock = lock
@@ -76,7 +77,7 @@ func (p *Pod) refresh() error {
logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
default:
- return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
+ return errors.Wrapf(define.ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
}
}
diff --git a/libpod/pod_top_linux.go b/libpod/pod_top_linux.go
index e08e5e83a..80221c3a9 100644
--- a/libpod/pod_top_linux.go
+++ b/libpod/pod_top_linux.go
@@ -6,6 +6,7 @@ import (
"strconv"
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/psgo"
)
@@ -34,7 +35,7 @@ func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) {
c.lock.Unlock()
return nil, err
}
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
pid := strconv.Itoa(c.state.PID)
pids = append(pids, pid)
}
diff --git a/libpod/pod_top_unsupported.go b/libpod/pod_top_unsupported.go
index d44470523..9a3333275 100644
--- a/libpod/pod_top_unsupported.go
+++ b/libpod/pod_top_unsupported.go
@@ -2,7 +2,9 @@
package libpod
+import "github.com/containers/libpod/libpod/define"
+
// GetPodPidInformation is exclusive to linux
func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) {
- return nil, ErrNotImplemented
+ return nil, define.ErrNotImplemented
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 1f8dd98b4..28958e932 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -5,13 +5,17 @@ import (
"fmt"
"io/ioutil"
"os"
+ "os/user"
"path/filepath"
+ "strings"
"sync"
"syscall"
+ "time"
"github.com/BurntSushi/toml"
is "github.com/containers/image/storage"
"github.com/containers/image/types"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/libpod/lock"
@@ -42,11 +46,20 @@ const (
SQLiteStateStore RuntimeStateStore = iota
// BoltDBStateStore is a state backed by a BoltDB database
BoltDBStateStore RuntimeStateStore = iota
+)
+
+var (
+ // InstallPrefix is the prefix where podman will be installed.
+ // It can be overridden at build time.
+ installPrefix = "/usr/local"
+ // EtcDir is the sysconfdir where podman should look for system config files.
+ // It can be overridden at build time.
+ etcDir = "/etc"
// SeccompDefaultPath defines the default seccomp path
- SeccompDefaultPath = "/usr/share/containers/seccomp.json"
+ SeccompDefaultPath = installPrefix + "/share/containers/seccomp.json"
// SeccompOverridePath if this exists it overrides the default seccomp path
- SeccompOverridePath = "/etc/crio/seccomp.json"
+ SeccompOverridePath = etcDir + "/crio/seccomp.json"
// ConfigPath is the path to the libpod configuration file
// This file is loaded to replace the builtin default config before
@@ -54,24 +67,24 @@ const (
// If it is not present, the builtin default config is used instead
// This path can be overridden when the runtime is created by using
// NewRuntimeFromConfig() instead of NewRuntime()
- ConfigPath = "/usr/share/containers/libpod.conf"
+ ConfigPath = installPrefix + "/share/containers/libpod.conf"
// OverrideConfigPath is the path to an override for the default libpod
// configuration file. If OverrideConfigPath exists, it will be used in
// place of the configuration file pointed to by ConfigPath.
- OverrideConfigPath = "/etc/containers/libpod.conf"
+ OverrideConfigPath = etcDir + "/containers/libpod.conf"
// DefaultInfraImage to use for infra container
- DefaultInfraImage = "k8s.gcr.io/pause:3.1"
- // DefaultInfraCommand to be run in an infra container
- DefaultInfraCommand = "/pause"
- // DefaultInitPath is the default path to the container-init binary
- DefaultInitPath = "/usr/libexec/podman/catatonit"
+ // DefaultInfraCommand to be run in an infra container
// DefaultSHMLockPath is the default path for SHM locks
DefaultSHMLockPath = "/libpod_lock"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
+
+ // DefaultDetachKeys is the default keys sequence for detaching a
+ // container
+ DefaultDetachKeys = "ctrl-p,ctrl-q"
)
// A RuntimeOption is a functional option which alters the Runtime created by
@@ -82,18 +95,18 @@ type RuntimeOption func(*Runtime) error
type Runtime struct {
config *RuntimeConfig
- state State
- store storage.Store
- storageService *storageService
- imageContext *types.SystemContext
- ociRuntime *OCIRuntime
- netPlugin ocicni.CNIPlugin
- ociRuntimePath OCIRuntimePath
- conmonPath string
- imageRuntime *image.Runtime
- firewallBackend firewall.FirewallBackend
- lockManager lock.Manager
- configuredFrom *runtimeConfiguredFrom
+ state State
+ store storage.Store
+ storageService *storageService
+ imageContext *types.SystemContext
+ defaultOCIRuntime *OCIRuntime
+ ociRuntimes map[string]*OCIRuntime
+ netPlugin ocicni.CNIPlugin
+ conmonPath string
+ imageRuntime *image.Runtime
+ firewallBackend firewall.FirewallBackend
+ lockManager lock.Manager
+ configuredFrom *runtimeConfiguredFrom
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
@@ -112,14 +125,9 @@ type Runtime struct {
// mechanism to read and write even logs
eventer events.Eventer
-}
-// OCIRuntimePath contains information about an OCI runtime.
-type OCIRuntimePath struct {
- // Name of the runtime to refer to by the --runtime flag
- Name string `toml:"name"`
- // Paths to check for this executable
- Paths []string `toml:"paths"`
+ // noStore indicates whether we need to interact with a store or not
+ noStore bool
}
// RuntimeConfig contains configuration options used to set up the runtime
@@ -151,6 +159,8 @@ type RuntimeConfig struct {
OCIRuntime string `toml:"runtime"`
// OCIRuntimes are the set of configured OCI runtimes (default is runc)
OCIRuntimes map[string][]string `toml:"runtimes"`
+ // RuntimeSupportsJSON is the list of the OCI runtimes that support --format=json
+ RuntimeSupportsJSON []string `toml:"runtime_supports_json"`
// RuntimePath is the path to OCI runtime binary for launching
// containers.
// The first path pointing to a valid file will be used
@@ -229,10 +239,15 @@ type RuntimeConfig struct {
// pods.
NumLocks uint32 `toml:"num_locks,omitempty"`
+ // LockType is the type of locking to use.
+ LockType string `toml:"lock_type,omitempty"`
+
// EventsLogger determines where events should be logged
EventsLogger string `toml:"events_logger"`
// EventsLogFilePath is where the events log is stored.
- EventsLogFilePath string `toml:-"events_logfile_path"`
+ EventsLogFilePath string `toml:"-events_logfile_path"`
+ //DetachKeys is the sequence of keys used to detach a container
+ DetachKeys string `toml:"detach_keys"`
}
// runtimeConfiguredFrom is a struct used during early runtime init to help
@@ -250,6 +265,7 @@ type runtimeConfiguredFrom struct {
volPathSet bool
conmonPath bool
conmonEnvVars bool
+ initPath bool
ociRuntimes bool
runtimePath bool
cniPluginDir bool
@@ -281,33 +297,74 @@ func defaultRuntimeConfig() (RuntimeConfig, error) {
},
ConmonPath: []string{
"/usr/libexec/podman/conmon",
- "/usr/libexec/crio/conmon",
"/usr/local/lib/podman/conmon",
- "/usr/local/libexec/crio/conmon",
"/usr/bin/conmon",
"/usr/sbin/conmon",
- "/usr/lib/crio/bin/conmon",
+ "/usr/local/bin/conmon",
+ "/usr/local/sbin/conmon",
},
ConmonEnvVars: []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
},
- InitPath: DefaultInitPath,
+ InitPath: define.DefaultInitPath,
CgroupManager: SystemdCgroupsManager,
StaticDir: filepath.Join(storeOpts.GraphRoot, "libpod"),
TmpDir: "",
MaxLogSize: -1,
NoPivotRoot: false,
- CNIConfigDir: "/etc/cni/net.d/",
+ CNIConfigDir: etcDir + "/cni/net.d/",
CNIPluginDir: []string{"/usr/libexec/cni", "/usr/lib/cni", "/usr/local/lib/cni", "/opt/cni/bin"},
- InfraCommand: DefaultInfraCommand,
- InfraImage: DefaultInfraImage,
+ InfraCommand: define.DefaultInfraCommand,
+ InfraImage: define.DefaultInfraImage,
EnablePortReservation: true,
EnableLabeling: true,
NumLocks: 2048,
EventsLogger: events.DefaultEventerType.String(),
+ DetachKeys: DefaultDetachKeys,
+ LockType: "shm",
}, nil
}
+// SetXdgRuntimeDir ensures the XDG_RUNTIME_DIR env variable is set
+// containers/image uses XDG_RUNTIME_DIR to locate the auth file.
+// It internally calls EnableLinger() so that the user's processes are not
+// killed once the session is terminated. EnableLinger() also attempts to
+// get the runtime directory when XDG_RUNTIME_DIR is not specified.
+func SetXdgRuntimeDir() error {
+ if !rootless.IsRootless() {
+ return nil
+ }
+
+ runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
+
+ runtimeDirLinger, err := rootless.EnableLinger()
+ if err != nil {
+ return errors.Wrapf(err, "error enabling user session")
+ }
+ if runtimeDir == "" && runtimeDirLinger != "" {
+ if _, err := os.Stat(runtimeDirLinger); err != nil && os.IsNotExist(err) {
+ chWait := make(chan error)
+ defer close(chWait)
+ if _, err := WaitForFile(runtimeDirLinger, chWait, time.Second*10); err != nil {
+ return errors.Wrapf(err, "waiting for directory '%s'", runtimeDirLinger)
+ }
+ }
+ runtimeDir = runtimeDirLinger
+ }
+
+ if runtimeDir == "" {
+ var err error
+ runtimeDir, err = util.GetRootlessRuntimeDir()
+ if err != nil {
+ return err
+ }
+ }
+ if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
+ return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
+ }
+ return nil
+}
+
func getDefaultTmpDir() (string, error) {
if !rootless.IsRootless() {
return "/var/run/libpod", nil
@@ -330,25 +387,6 @@ func getDefaultTmpDir() (string, error) {
return filepath.Join(libpodRuntimeDir, "tmp"), nil
}
-// SetXdgRuntimeDir ensures the XDG_RUNTIME_DIR env variable is set
-// containers/image uses XDG_RUNTIME_DIR to locate the auth file.
-func SetXdgRuntimeDir(val string) error {
- if !rootless.IsRootless() {
- return nil
- }
- if val == "" {
- var err error
- val, err = util.GetRootlessRuntimeDir()
- if err != nil {
- return err
- }
- }
- if err := os.Setenv("XDG_RUNTIME_DIR", val); err != nil {
- return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
- }
- return nil
-}
-
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) {
@@ -367,6 +405,73 @@ func NewRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
return newRuntimeFromConfig(ctx, userConfigPath, options...)
}
+func homeDir() (string, error) {
+ home := os.Getenv("HOME")
+ if home == "" {
+ usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID()))
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to resolve HOME directory")
+ }
+ home = usr.HomeDir
+ }
+ return home, nil
+}
+
+func getRootlessConfigPath() (string, error) {
+ home, err := homeDir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(home, ".config/containers/libpod.conf"), nil
+}
+
+func getConfigPath() (string, error) {
+ if rootless.IsRootless() {
+ path, err := getRootlessConfigPath()
+ if err != nil {
+ return "", err
+ }
+ if _, err := os.Stat(path); err == nil {
+ return path, nil
+ }
+ return "", err
+ }
+ if _, err := os.Stat(OverrideConfigPath); err == nil {
+ // Use the override configuration path
+ return OverrideConfigPath, nil
+ }
+ if _, err := os.Stat(ConfigPath); err == nil {
+ return ConfigPath, nil
+ }
+ return "", nil
+}
+
+// DefaultRuntimeConfig reads default config path and returns the RuntimeConfig
+func DefaultRuntimeConfig() (*RuntimeConfig, error) {
+ configPath, err := getConfigPath()
+ if err != nil {
+ return nil, err
+ }
+
+ contents, err := ioutil.ReadFile(configPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading configuration file %s", configPath)
+ }
+
+ // This is ugly, but we need to decode twice.
+ // Once to check if libpod static and tmp dirs were explicitly
+ // set (not enough to check if they're not the default value,
+ // might have been explicitly configured to the default).
+ // A second time to actually get a usable config.
+ tmpConfig := new(RuntimeConfig)
+ if _, err := toml.Decode(string(contents), tmpConfig); err != nil {
+ return nil, errors.Wrapf(err, "error decoding configuration file %s",
+ configPath)
+ }
+ return tmpConfig, nil
+}
+
func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ...RuntimeOption) (runtime *Runtime, err error) {
runtime = new(Runtime)
runtime.config = new(RuntimeConfig)
@@ -395,31 +500,21 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
runtime.config.StaticDir = filepath.Join(storageConf.GraphRoot, "libpod")
runtime.config.VolumePath = filepath.Join(storageConf.GraphRoot, "volumes")
- configPath := ConfigPath
- foundConfig := true
- rootlessConfigPath := ""
+ configPath, err := getConfigPath()
+ if err != nil {
+ return nil, err
+ }
if rootless.IsRootless() {
- home := os.Getenv("HOME")
+ home, err := homeDir()
+ if err != nil {
+ return nil, err
+ }
if runtime.config.SignaturePolicyPath == "" {
newPath := filepath.Join(home, ".config/containers/policy.json")
if _, err := os.Stat(newPath); err == nil {
runtime.config.SignaturePolicyPath = newPath
}
}
-
- rootlessConfigPath = filepath.Join(home, ".config/containers/libpod.conf")
-
- runtimeDir, err := util.GetRootlessRuntimeDir()
- if err != nil {
- return nil, err
- }
-
- // containers/image uses XDG_RUNTIME_DIR to locate the auth file.
- // So make sure the env variable is set.
- if err := SetXdgRuntimeDir(runtimeDir); err != nil {
- return nil, errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
- }
-
}
if userConfigPath != "" {
@@ -429,21 +524,10 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
// when it doesn't exist
return nil, errors.Wrapf(err, "cannot stat %s", configPath)
}
- } else if rootless.IsRootless() {
- configPath = rootlessConfigPath
- if _, err := os.Stat(configPath); err != nil {
- foundConfig = false
- }
- } else if _, err := os.Stat(OverrideConfigPath); err == nil {
- // Use the override configuration path
- configPath = OverrideConfigPath
- } else if _, err := os.Stat(ConfigPath); err != nil {
- // Both stat checks failed, no config found
- foundConfig = false
}
// If we have a valid configuration file, load it in
- if foundConfig {
+ if configPath != "" {
contents, err := ioutil.ReadFile(configPath)
if err != nil {
return nil, errors.Wrapf(err, "error reading configuration file %s", configPath)
@@ -475,6 +559,9 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
if tmpConfig.ConmonEnvVars != nil {
runtime.configuredFrom.conmonEnvVars = true
}
+ if tmpConfig.InitPath != "" {
+ runtime.configuredFrom.initPath = true
+ }
if tmpConfig.OCIRuntimes != nil {
runtime.configuredFrom.ociRuntimes = true
}
@@ -512,6 +599,9 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
if !runtime.configuredFrom.conmonEnvVars {
runtime.config.ConmonEnvVars = tmpConfig.ConmonEnvVars
}
+ if !runtime.configuredFrom.initPath {
+ runtime.config.InitPath = tmpConfig.InitPath
+ }
if !runtime.configuredFrom.ociRuntimes {
runtime.config.OCIRuntimes = tmpConfig.OCIRuntimes
}
@@ -534,7 +624,13 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
return nil, errors.Wrapf(err, "error configuring runtime")
}
}
- if rootlessConfigPath != "" {
+
+ if rootless.IsRootless() && configPath == "" {
+ configPath, err := getRootlessConfigPath()
+ if err != nil {
+ return nil, err
+ }
+
// storage.conf
storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless())
if err != nil {
@@ -546,17 +642,21 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
}
}
- if !foundConfig {
- os.MkdirAll(filepath.Dir(rootlessConfigPath), 0755)
- file, err := os.OpenFile(rootlessConfigPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if configPath != "" {
+ if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil {
+ return nil, err
+ }
+ file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil && !os.IsExist(err) {
- return nil, errors.Wrapf(err, "cannot open file %s", rootlessConfigPath)
+ return nil, errors.Wrapf(err, "cannot open file %s", configPath)
}
if err == nil {
defer file.Close()
enc := toml.NewEncoder(file)
if err := enc.Encode(runtime.config); err != nil {
- os.Remove(rootlessConfigPath)
+ if removeErr := os.Remove(configPath); removeErr != nil {
+ logrus.Debugf("unable to remove %s: %q", configPath, err)
+ }
}
}
}
@@ -567,66 +667,65 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
return runtime, nil
}
-// Make a new runtime based on the given configuration
-// Sets up containers/storage, state store, OCI runtime
-func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
- // Backward compatibility for `runtime_path`
- if runtime.config.RuntimePath != nil {
- // Don't print twice in rootless mode.
- if os.Geteuid() == 0 {
- logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`")
- logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used")
- }
+func getLockManager(runtime *Runtime) (lock.Manager, error) {
+ var err error
+ var manager lock.Manager
- // Transform `runtime_path` into `runtimes` and `runtime`.
- name := filepath.Base(runtime.config.RuntimePath[0])
- runtime.config.OCIRuntime = name
- runtime.config.OCIRuntimes = map[string][]string{name: runtime.config.RuntimePath}
- }
-
- // Find a working OCI runtime binary
- foundRuntime := false
- // If runtime is an absolute path, then use it as it is.
- if runtime.config.OCIRuntime != "" && runtime.config.OCIRuntime[0] == '/' {
- foundRuntime = true
- runtime.ociRuntimePath = OCIRuntimePath{Name: filepath.Base(runtime.config.OCIRuntime), Paths: []string{runtime.config.OCIRuntime}}
- stat, err := os.Stat(runtime.config.OCIRuntime)
+ switch runtime.config.LockType {
+ case "file":
+ lockPath := filepath.Join(runtime.config.TmpDir, "locks")
+ manager, err = lock.OpenFileLockManager(lockPath)
if err != nil {
- if os.IsNotExist(err) {
- return errors.Wrapf(err, "the specified OCI runtime %s does not exist", runtime.config.OCIRuntime)
+ if os.IsNotExist(errors.Cause(err)) {
+ manager, err = lock.NewFileLockManager(lockPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get new file lock manager")
+ }
+ } else {
+ return nil, err
}
- return errors.Wrapf(err, "cannot stat the OCI runtime path %s", runtime.config.OCIRuntime)
}
- if !stat.Mode().IsRegular() {
- return fmt.Errorf("the specified OCI runtime %s is not a valid file", runtime.config.OCIRuntime)
+
+ case "", "shm":
+ lockPath := DefaultSHMLockPath
+ if rootless.IsRootless() {
+ lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
}
- } else {
- // If not, look it up in the configuration.
- paths := runtime.config.OCIRuntimes[runtime.config.OCIRuntime]
- if paths != nil {
- for _, path := range paths {
- stat, err := os.Stat(path)
+ // Set up the lock manager
+ manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ if os.IsNotExist(errors.Cause(err)) {
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
if err != nil {
- if os.IsNotExist(err) {
- continue
- }
- return errors.Wrapf(err, "cannot stat %s", path)
+ return nil, errors.Wrapf(err, "failed to get new shm lock manager")
}
- if !stat.Mode().IsRegular() {
- continue
+ } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
+ logrus.Debugf("Number of locks does not match - removing old locks")
+
+ // ERANGE indicates a lock numbering mismatch.
+ // Since we're renumbering, this is not fatal.
+ // Remove the earlier set of locks and recreate.
+ if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
+ return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
}
- foundRuntime = true
- runtime.ociRuntimePath = OCIRuntimePath{Name: runtime.config.OCIRuntime, Paths: []string{path}}
- break
+
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
}
}
+ default:
+ return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.LockType)
}
- if !foundRuntime {
- return errors.Wrapf(ErrInvalidArg,
- "could not find a working binary (configured options: %v)",
- runtime.config.OCIRuntimes)
- }
+ return manager, nil
+}
+// Make a new runtime based on the given configuration
+// Sets up containers/storage, state store, OCI runtime
+func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
// Find a working conmon binary
foundConmon := false
for _, path := range runtime.config.ConmonPath {
@@ -642,7 +741,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
break
}
if !foundConmon {
- return errors.Wrapf(ErrInvalidArg,
+ return errors.Wrapf(define.ErrInvalidArg,
"could not find a working conmon binary (configured options: %v)",
runtime.config.ConmonPath)
}
@@ -665,7 +764,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
runtime.state = state
case SQLiteStateStore:
- return errors.Wrapf(ErrInvalidArg, "SQLite state is currently disabled")
+ return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
case BoltDBStateStore:
dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db")
@@ -675,7 +774,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
runtime.state = state
default:
- return errors.Wrapf(ErrInvalidArg, "unrecognized state type passed")
+ return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed")
}
// Grab config from the database so we can reset some defaults
@@ -752,39 +851,23 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
var store storage.Store
if os.Geteuid() != 0 {
logrus.Debug("Not configuring container store")
+ } else if runtime.noStore {
+ logrus.Debug("No store required. Not opening container store.")
} else {
- store, err = storage.GetStore(runtime.config.StorageConfig)
- if err != nil {
+ if err := runtime.configureStore(); err != nil {
return err
}
-
- defer func() {
- if err != nil && store != nil {
- // Don't forcibly shut down
- // We could be opening a store in use by another libpod
- _, err2 := store.Shutdown(false)
- if err2 != nil {
- logrus.Errorf("Error removing store for partially-created runtime: %s", err2)
- }
- }
- }()
}
-
- runtime.store = store
- is.Transport.SetStore(store)
-
- // Set up image runtime and store in runtime
- ir := image.NewImageRuntimeFromStore(runtime.store)
-
- runtime.imageRuntime = ir
-
- // Setting signaturepolicypath
- ir.SignaturePolicyPath = runtime.config.SignaturePolicyPath
-
- // Set logfile path for events
- ir.EventsLogFilePath = runtime.config.EventsLogFilePath
- // Set logger type
- ir.EventsLogger = runtime.config.EventsLogger
+ defer func() {
+ if err != nil && store != nil {
+ // Don't forcibly shut down
+ // We could be opening a store in use by another libpod
+ _, err2 := store.Shutdown(false)
+ if err2 != nil {
+ logrus.Errorf("Error removing store for partially-created runtime: %s", err2)
+ }
+ }
+ }()
// Setup the eventer
eventer, err := runtime.newEventer()
@@ -792,15 +875,9 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
return err
}
runtime.eventer = eventer
- ir.Eventer = eventer
-
- // Set up a storage service for creating container root filesystems from
- // images
- storageService, err := getStorageService(runtime.store)
- if err != nil {
- return err
+ if runtime.imageRuntime != nil {
+ runtime.imageRuntime.Eventer = eventer
}
- runtime.storageService = storageService
// Set up containers/image
runtime.imageContext = &types.SystemContext{
@@ -823,16 +900,107 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
}
- // Make an OCI runtime to perform container operations
- ociRuntime, err := newOCIRuntime(runtime.ociRuntimePath,
- runtime.conmonPath, runtime.config.ConmonEnvVars,
- runtime.config.CgroupManager, runtime.config.TmpDir,
- runtime.config.MaxLogSize, runtime.config.NoPivotRoot,
- runtime.config.EnablePortReservation)
- if err != nil {
- return err
+ // Get us at least one working OCI runtime.
+ runtime.ociRuntimes = make(map[string]*OCIRuntime)
+
+ // Is the old runtime_path defined?
+ if runtime.config.RuntimePath != nil {
+ // Don't print twice in rootless mode.
+ if os.Geteuid() == 0 {
+ logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`")
+ logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used")
+ }
+
+ if len(runtime.config.RuntimePath) == 0 {
+ return errors.Wrapf(define.ErrInvalidArg, "empty runtime path array passed")
+ }
+
+ name := filepath.Base(runtime.config.RuntimePath[0])
+
+ supportsJSON := false
+ for _, r := range runtime.config.RuntimeSupportsJSON {
+ if r == name {
+ supportsJSON = true
+ break
+ }
+ }
+
+ ociRuntime, err := newOCIRuntime(name, runtime.config.RuntimePath, runtime.conmonPath, runtime.config, supportsJSON)
+ if err != nil {
+ return err
+ }
+
+ runtime.ociRuntimes[name] = ociRuntime
+ runtime.defaultOCIRuntime = ociRuntime
+ }
+
+ // Initialize remaining OCI runtimes
+ for name, paths := range runtime.config.OCIRuntimes {
+ if len(paths) == 0 {
+ return errors.Wrapf(define.ErrInvalidArg, "must provide at least 1 path to OCI runtime %s", name)
+ }
+
+ supportsJSON := false
+ for _, r := range runtime.config.RuntimeSupportsJSON {
+ if r == name {
+ supportsJSON = true
+ break
+ }
+ }
+
+ ociRuntime, err := newOCIRuntime(name, paths, runtime.conmonPath, runtime.config, supportsJSON)
+ if err != nil {
+ // Don't fatally error.
+ // This will allow us to ship configs including optional
+ // runtimes that might not be installed (crun, kata).
+ // Only a warnf so default configs don't spec errors.
+ logrus.Warnf("Error initializing configured OCI runtime %s: %v", name, err)
+ continue
+ }
+
+ runtime.ociRuntimes[name] = ociRuntime
+ }
+
+ // Do we have a default OCI runtime?
+ if runtime.config.OCIRuntime != "" {
+ // If the string starts with / it's a path to a runtime
+ // executable.
+ if strings.HasPrefix(runtime.config.OCIRuntime, "/") {
+ name := filepath.Base(runtime.config.OCIRuntime)
+
+ supportsJSON := false
+ for _, r := range runtime.config.RuntimeSupportsJSON {
+ if r == name {
+ supportsJSON = true
+ break
+ }
+ }
+
+ ociRuntime, err := newOCIRuntime(name, []string{runtime.config.OCIRuntime}, runtime.conmonPath, runtime.config, supportsJSON)
+ if err != nil {
+ return err
+ }
+
+ runtime.ociRuntimes[name] = ociRuntime
+ runtime.defaultOCIRuntime = ociRuntime
+ } else {
+ ociRuntime, ok := runtime.ociRuntimes[runtime.config.OCIRuntime]
+ if !ok {
+ return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.OCIRuntime)
+ }
+ runtime.defaultOCIRuntime = ociRuntime
+ }
+ }
+
+ // Do we have at least one valid OCI runtime?
+ if len(runtime.ociRuntimes) == 0 {
+ return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
+ }
+
+ // Do we have a default runtime?
+ if runtime.defaultOCIRuntime == nil {
+ return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
}
- runtime.ociRuntime = ociRuntime
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil {
@@ -915,37 +1083,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
}
- lockPath := DefaultSHMLockPath
- if rootless.IsRootless() {
- lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
- }
- // Set up the lock manager
- manager, err := lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
+ runtime.lockManager, err = getLockManager(runtime)
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
- manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
- if err != nil {
- return errors.Wrapf(err, "failed to get new shm lock manager")
- }
- } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
- logrus.Debugf("Number of locks does not match - removing old locks")
-
- // ERANGE indicates a lock numbering mismatch.
- // Since we're renumbering, this is not fatal.
- // Remove the earlier set of locks and recreate.
- if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
- return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
- }
-
- manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
- if err != nil {
- return err
- }
- } else {
- return err
- }
+ return err
}
- runtime.lockManager = manager
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
@@ -959,6 +1100,13 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
+ // Ensure we have a store before refresh occurs
+ if runtime.store == nil {
+ if err := runtime.configureStore(); err != nil {
+ return err
+ }
+ }
+
if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
return err2
}
@@ -983,7 +1131,7 @@ func (r *Runtime) GetConfig() (*RuntimeConfig, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
config := new(RuntimeConfig)
@@ -996,6 +1144,13 @@ func (r *Runtime) GetConfig() (*RuntimeConfig, error) {
return config, nil
}
+// DeferredShutdown shuts down the runtime without exposing any
+// errors. This is only meant to be used when the runtime is being
+// shutdown within a defer statement; else use Shutdown
+func (r *Runtime) DeferredShutdown(force bool) {
+ _ = r.Shutdown(force)
+}
+
// Shutdown shuts down the runtime and associated containers and storage
// If force is true, containers and mounted storage will be shut down before
// cleaning up; if force is false, an error will be returned if there are
@@ -1005,7 +1160,7 @@ func (r *Runtime) Shutdown(force bool) error {
defer r.lock.Unlock()
if !r.valid {
- return ErrRuntimeStopped
+ return define.ErrRuntimeStopped
}
r.valid = false
@@ -1017,7 +1172,7 @@ func (r *Runtime) Shutdown(force bool) error {
logrus.Errorf("Error retrieving containers from database: %v", err)
} else {
for _, ctr := range ctrs {
- if err := ctr.StopWithTimeout(CtrRemoveTimeout); err != nil {
+ if err := ctr.StopWithTimeout(define.CtrRemoveTimeout); err != nil {
logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err)
}
}
@@ -1025,6 +1180,8 @@ func (r *Runtime) Shutdown(force bool) error {
}
var lastError error
+ // If no store was requested, it can bew nil and there is no need to
+ // attempt to shut it down
if r.store != nil {
if _, err := r.store.Shutdown(force); err != nil {
lastError = errors.Wrapf(err, "Error shutting down container storage")
@@ -1092,21 +1249,21 @@ func (r *Runtime) refresh(alivePath string) error {
}
// Info returns the store and host information
-func (r *Runtime) Info() ([]InfoData, error) {
- info := []InfoData{}
+func (r *Runtime) Info() ([]define.InfoData, error) {
+ info := []define.InfoData{}
// get host information
hostInfo, err := r.hostInfo()
if err != nil {
return nil, errors.Wrapf(err, "error getting host info")
}
- info = append(info, InfoData{Type: "host", Data: hostInfo})
+ info = append(info, define.InfoData{Type: "host", Data: hostInfo})
// get store information
storeInfo, err := r.storeInfo()
if err != nil {
return nil, errors.Wrapf(err, "error getting store info")
}
- info = append(info, InfoData{Type: "store", Data: storeInfo})
+ info = append(info, define.InfoData{Type: "store", Data: storeInfo})
reg, err := sysreg.GetRegistries()
if err != nil {
@@ -1126,7 +1283,7 @@ func (r *Runtime) Info() ([]InfoData, error) {
return nil, errors.Wrapf(err, "error getting registries")
}
registries["blocked"] = breg
- info = append(info, InfoData{Type: "registries", Data: registries})
+ info = append(info, define.InfoData{Type: "registries", Data: registries})
return info, nil
}
@@ -1138,7 +1295,7 @@ func (r *Runtime) generateName() (string, error) {
if _, err := r.state.LookupContainer(name); err == nil {
continue
} else {
- if errors.Cause(err) != ErrNoSuchCtr {
+ if errors.Cause(err) != define.ErrNoSuchCtr {
return "", err
}
}
@@ -1146,7 +1303,7 @@ func (r *Runtime) generateName() (string, error) {
if _, err := r.state.LookupPod(name); err == nil {
continue
} else {
- if errors.Cause(err) != ErrNoSuchPod {
+ if errors.Cause(err) != define.ErrNoSuchPod {
return "", err
}
}
@@ -1155,7 +1312,37 @@ func (r *Runtime) generateName() (string, error) {
// The code should never reach here.
}
-// ImageRuntime returns the imageruntime for image resolution
+// Configure store and image runtime
+func (r *Runtime) configureStore() error {
+ store, err := storage.GetStore(r.config.StorageConfig)
+ if err != nil {
+ return err
+ }
+
+ r.store = store
+ is.Transport.SetStore(store)
+
+ // Set up a storage service for creating container root filesystems from
+ // images
+ storageService, err := getStorageService(r.store)
+ if err != nil {
+ return err
+ }
+ r.storageService = storageService
+
+ ir := image.NewImageRuntimeFromStore(r.store)
+ ir.SignaturePolicyPath = r.config.SignaturePolicyPath
+ ir.EventsLogFilePath = r.config.EventsLogFilePath
+ ir.EventsLogger = r.config.EventsLogger
+
+ r.imageRuntime = ir
+
+ return nil
+}
+
+// ImageRuntime returns the imageruntime for image operations.
+// If WithNoStore() was used, no image runtime will be available, and this
+// function will return nil.
func (r *Runtime) ImageRuntime() *image.Runtime {
return r.imageRuntime
}
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
new file mode 100644
index 000000000..586db5a1e
--- /dev/null
+++ b/libpod/runtime_cstorage.go
@@ -0,0 +1,119 @@
+package libpod
+
+import (
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// StorageContainer represents a container present in c/storage but not in
+// libpod.
+type StorageContainer struct {
+ ID string
+ Names []string
+ PresentInLibpod bool
+}
+
+// ListStorageContainers lists all containers visible to c/storage.
+func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ finalCtrs := []*StorageContainer{}
+
+ ctrs, err := r.store.Containers()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, ctr := range ctrs {
+ storageCtr := new(StorageContainer)
+ storageCtr.ID = ctr.ID
+ storageCtr.Names = ctr.Names
+
+ // Look up if container is in state
+ hasCtr, err := r.state.HasContainer(ctr.ID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up container %s in state", ctr.ID)
+ }
+
+ storageCtr.PresentInLibpod = hasCtr
+
+ finalCtrs = append(finalCtrs, storageCtr)
+ }
+
+ return finalCtrs, nil
+}
+
+// RemoveStorageContainer removes a container from c/storage.
+// The container WILL NOT be removed if it exists in libpod.
+// Accepts ID or full name of container.
+// If force is set, the container will be unmounted first to ensure removal.
+func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ targetID, err := r.store.Lookup(idOrName)
+ if err != nil {
+ if err == storage.ErrLayerUnknown {
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID or name %q found", idOrName)
+ }
+ return errors.Wrapf(err, "error looking up container %q", idOrName)
+ }
+
+ // Lookup returns an ID but it's not guaranteed to be a container ID.
+ // So we can still error here.
+ ctr, err := r.store.Container(targetID)
+ if err != nil {
+ if err == storage.ErrContainerUnknown {
+ return errors.Wrapf(define.ErrNoSuchCtr, "%q does not refer to a container", idOrName)
+ }
+ return errors.Wrapf(err, "error retrieving container %q", idOrName)
+ }
+
+ // Error out if the container exists in libpod
+ exists, err := r.state.HasContainer(ctr.ID)
+ if err != nil {
+ return err
+ }
+ if exists {
+ return errors.Wrapf(define.ErrCtrExists, "refusing to remove %q as it exists in libpod as container %s", idOrName, ctr.ID)
+ }
+
+ if !force {
+ timesMounted, err := r.store.Mounted(ctr.ID)
+ if err != nil {
+ if err == storage.ErrContainerUnknown {
+ // Container was removed from under us.
+ // It's gone, so don't bother erroring.
+ logrus.Warnf("Storage for container %s already removed", ctr.ID)
+ return nil
+ }
+ return errors.Wrapf(err, "error looking up container %q mounts", idOrName)
+ }
+ if timesMounted > 0 {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName)
+ }
+ } else {
+ if _, err := r.store.Unmount(ctr.ID, true); err != nil {
+ if err == storage.ErrContainerUnknown {
+ // Container again gone, no error
+ logrus.Warnf("Storage for container %s already removed", ctr.ID)
+ return nil
+ }
+ return errors.Wrapf(err, "error unmounting container %q", idOrName)
+ }
+ }
+
+ if err := r.store.DeleteContainer(ctr.ID); err != nil {
+ if err == storage.ErrContainerUnknown {
+ // Container again gone, no error
+ logrus.Warnf("Storage for container %s already removed", ctr.ID)
+ return nil
+ }
+ return errors.Wrapf(err, "error removing storage for container %q", idOrName)
+ }
+
+ return nil
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 0c8d3edab..e57ab4634 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -8,21 +8,17 @@ import (
"strings"
"time"
+ config2 "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
- "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
- "github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-// CtrRemoveTimeout is the default number of seconds to wait after stopping a container
-// before sending the kill signal
-const CtrRemoveTimeout = 10
-
// Contains the public Runtime API for containers
// A CtrCreateOption is a functional option which alters the Container created
@@ -34,30 +30,57 @@ type CtrCreateOption func(*Container) error
// A true return will include the container, a false return will exclude it.
type ContainerFilter func(*Container) bool
-// NewContainer creates a new container from a given OCI config
+// NewContainer creates a new container from a given OCI config.
func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, config2.ErrRuntimeStopped
}
return r.newContainer(ctx, rSpec, options...)
}
-func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "newContainer")
- span.SetTag("type", "runtime")
- defer span.Finish()
+// RestoreContainer re-creates a container from an imported checkpoint
+func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ if !r.valid {
+ return nil, config2.ErrRuntimeStopped
+ }
- if rSpec == nil {
- return nil, errors.Wrapf(ErrInvalidArg, "must provide a valid runtime spec to create container")
+ ctr, err := r.initContainerVariables(rSpec, config)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error initializing container variables")
}
+ return r.setupContainer(ctx, ctr)
+}
+func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) {
+ if rSpec == nil {
+ return nil, errors.Wrapf(config2.ErrInvalidArg, "must provide a valid runtime spec to create container")
+ }
ctr := new(Container)
ctr.config = new(ContainerConfig)
ctr.state = new(ContainerState)
- ctr.config.ID = stringid.GenerateNonCryptoID()
+ if config == nil {
+ ctr.config.ID = stringid.GenerateNonCryptoID()
+ ctr.config.ShmSize = DefaultShmSize
+ } else {
+ // This is a restore from an imported checkpoint
+ ctr.restoreFromCheckpoint = true
+ if err := JSONDeepCopy(config, ctr.config); err != nil {
+ return nil, errors.Wrapf(err, "error copying container config for restore")
+ }
+ // If the ID is empty a new name for the restored container was requested
+ if ctr.config.ID == "" {
+ ctr.config.ID = stringid.GenerateNonCryptoID()
+ // Fixup ExitCommand with new ID
+ ctr.config.ExitCommand[len(ctr.config.ExitCommand)-1] = ctr.config.ID
+ }
+ // Reset the log path to point to the default
+ ctr.config.LogPath = ""
+ }
ctr.config.Spec = new(spec.Spec)
if err := JSONDeepCopy(rSpec, ctr.config.Spec); err != nil {
@@ -65,13 +88,11 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
ctr.config.CreatedTime = time.Now()
- ctr.config.ShmSize = DefaultShmSize
-
ctr.state.BindMounts = make(map[string]string)
- ctr.config.StopTimeout = CtrRemoveTimeout
+ ctr.config.StopTimeout = config2.CtrRemoveTimeout
- ctr.config.OCIRuntime = r.config.OCIRuntime
+ ctr.config.OCIRuntime = r.defaultOCIRuntime.name
// Set namespace based on current runtime namespace
// Do so before options run so they can override it
@@ -80,12 +101,29 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
ctr.runtime = r
+
+ return ctr, nil
+}
+
+func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "newContainer")
+ span.SetTag("type", "runtime")
+ defer span.Finish()
+
+ ctr, err := r.initContainerVariables(rSpec, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error initializing container variables")
+ }
+
for _, option := range options {
if err := option(ctr); err != nil {
return nil, errors.Wrapf(err, "error running container create option")
}
}
+ return r.setupContainer(ctx, ctr)
+}
+func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Container, err error) {
// Allocate a lock for the container
lock, err := r.lockManager.AllocateLock()
if err != nil {
@@ -95,10 +133,28 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr.config.LockID = ctr.lock.ID()
logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID())
+ defer func() {
+ if err != nil {
+ if err2 := ctr.lock.Free(); err2 != nil {
+ logrus.Errorf("Error freeing lock for container after creation failed: %v", err2)
+ }
+ }
+ }()
+
ctr.valid = true
- ctr.state.State = ContainerStateConfigured
+ ctr.state.State = config2.ContainerStateConfigured
ctr.runtime = r
+ if ctr.config.OCIRuntime == "" {
+ ctr.ociRuntime = r.defaultOCIRuntime
+ } else {
+ ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime]
+ if !ok {
+ return nil, errors.Wrapf(config2.ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime)
+ }
+ ctr.ociRuntime = ociRuntime
+ }
+
var pod *Pod
if ctr.config.Pod != "" {
// Get the pod from state
@@ -127,14 +183,14 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
}
if podCgroup == "" {
- return nil, errors.Wrapf(ErrInternal, "pod %s cgroup is not set", pod.ID())
+ return nil, errors.Wrapf(config2.ErrInternal, "pod %s cgroup is not set", pod.ID())
}
ctr.config.CgroupParent = podCgroup
} else {
ctr.config.CgroupParent = CgroupfsDefaultCgroupParent
}
} else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ return nil, errors.Wrapf(config2.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
}
case SystemdCgroupsManager:
if ctr.config.CgroupParent == "" {
@@ -144,14 +200,29 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
}
ctr.config.CgroupParent = podCgroup
+ } else if rootless.IsRootless() {
+ ctr.config.CgroupParent = SystemdDefaultRootlessCgroupParent
} else {
ctr.config.CgroupParent = SystemdDefaultCgroupParent
}
} else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
+ return nil, errors.Wrapf(config2.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
}
default:
- return nil, errors.Wrapf(ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager)
+ return nil, errors.Wrapf(config2.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager)
+ }
+
+ if ctr.restoreFromCheckpoint {
+ // Remove information about bind mount
+ // for new container from imported checkpoint
+ g := generate.Generator{Config: ctr.config.Spec}
+ g.RemoveMount("/dev/shm")
+ ctr.config.ShmDir = ""
+ g.RemoveMount("/etc/resolv.conf")
+ g.RemoveMount("/etc/hostname")
+ g.RemoveMount("/etc/hosts")
+ g.RemoveMount("/run/.containerenv")
+ g.RemoveMount("/run/secrets")
}
// Set up storage for the container
@@ -166,7 +237,7 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
}()
- if rootless.IsRootless() && ctr.config.ConmonPidFile == "" {
+ if ctr.config.ConmonPidFile == "" {
ctr.config.ConmonPidFile = filepath.Join(ctr.state.RunDir, "conmon.pid")
}
@@ -178,7 +249,7 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
if err == nil {
// The volume exists, we're good
continue
- } else if errors.Cause(err) != ErrNoSuchVolume {
+ } else if errors.Cause(err) != config2.ErrNoSuchVolume {
return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name)
}
@@ -292,7 +363,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
if !r.valid {
- return ErrRuntimeStopped
+ return config2.ErrRuntimeStopped
}
// Update the container to get current state
@@ -308,8 +379,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
- if c.state.State == ContainerStatePaused {
- if err := c.runtime.ociRuntime.killContainer(c, 9); err != nil {
+ if c.state.State == config2.ContainerStatePaused {
+ if err := c.ociRuntime.killContainer(c, 9); err != nil {
return err
}
if err := c.unpause(); err != nil {
@@ -322,8 +393,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
// Check that the container's in a good state to be removed
- if c.state.State == ContainerStateRunning {
- if err := r.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil {
+ if c.state.State == config2.ContainerStateRunning {
+ if err := c.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil {
return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
}
@@ -335,7 +406,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// Check that all of our exec sessions have finished
if len(c.state.ExecSessions) != 0 {
- if err := r.ociRuntime.execStopContainer(c, c.StopTimeout()); err != nil {
+ if err := c.ociRuntime.execStopContainer(c, c.StopTimeout()); err != nil {
return err
}
}
@@ -350,7 +421,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
if len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
- return errors.Wrapf(ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
+ return errors.Wrapf(config2.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
}
}
@@ -361,20 +432,12 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// from the state elsewhere
if !removePod {
if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
- if cleanupErr == nil {
- cleanupErr = err
- } else {
- logrus.Errorf("removing container from pod: %v", err)
- }
+ cleanupErr = err
}
}
} else {
if err := r.state.RemoveContainer(c); err != nil {
- if cleanupErr == nil {
- cleanupErr = err
- } else {
- logrus.Errorf("removing container: %v", err)
- }
+ cleanupErr = err
}
}
@@ -402,8 +465,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// Delete the container.
// Not needed in Configured and Exited states, where the container
// doesn't exist in the runtime
- if c.state.State != ContainerStateConfigured &&
- c.state.State != ContainerStateExited {
+ if c.state.State != config2.ContainerStateConfigured &&
+ c.state.State != config2.ContainerStateExited {
if err := c.delete(ctx); err != nil {
if cleanupErr == nil {
cleanupErr = err
@@ -433,7 +496,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
if !volume.IsCtrSpecific() {
continue
}
- if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
+ if err := runtime.removeVolume(ctx, volume, false); err != nil && err != config2.ErrNoSuchVolume && err != config2.ErrVolumeBeingUsed {
logrus.Errorf("cleanup volume (%s): %v", v, err)
}
}
@@ -448,7 +511,7 @@ func (r *Runtime) GetContainer(id string) (*Container, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, config2.ErrRuntimeStopped
}
return r.state.Container(id)
@@ -460,7 +523,7 @@ func (r *Runtime) HasContainer(id string) (bool, error) {
defer r.lock.RUnlock()
if !r.valid {
- return false, ErrRuntimeStopped
+ return false, config2.ErrRuntimeStopped
}
return r.state.HasContainer(id)
@@ -473,7 +536,7 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, config2.ErrRuntimeStopped
}
return r.state.LookupContainer(idOrName)
}
@@ -487,7 +550,7 @@ func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, config2.ErrRuntimeStopped
}
ctrs, err := r.state.AllContainers()
@@ -520,7 +583,7 @@ func (r *Runtime) GetAllContainers() ([]*Container, error) {
func (r *Runtime) GetRunningContainers() ([]*Container, error) {
running := func(c *Container) bool {
state, _ := c.State()
- return state == ContainerStateRunning
+ return state == config2.ContainerStateRunning
}
return r.GetContainers(running)
}
@@ -548,7 +611,7 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
return nil, errors.Wrapf(err, "unable to find latest container")
}
if len(ctrs) == 0 {
- return nil, ErrNoSuchCtr
+ return nil, config2.ErrNoSuchCtr
}
for containerIndex, ctr := range ctrs {
createdTime := ctr.config.CreatedTime
@@ -559,16 +622,3 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
}
return ctrs[lastCreatedIndex], nil
}
-
-// RemoveContainersFromStorage attempt to remove containers from storage that do not exist in libpod database
-func (r *Runtime) RemoveContainersFromStorage(ctrs []string) {
- for _, i := range ctrs {
- // if the container does not exist in database, attempt to remove it from storage
- if _, err := r.LookupContainer(i); err != nil && errors.Cause(err) == image.ErrNoSuchCtr {
- r.storageService.UnmountContainerImage(i, true)
- if err := r.storageService.DeleteContainer(i); err != nil && errors.Cause(err) != storage.ErrContainerUnknown {
- logrus.Errorf("Failed to remove container %q from storage: %s", i, err)
- }
- }
- }
-}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 7cc7de270..4055734eb 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -10,6 +10,7 @@ import (
"os"
"github.com/containers/buildah/imagebuildah"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
@@ -31,7 +32,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool)
defer r.lock.Unlock()
if !r.valid {
- return "", ErrRuntimeStopped
+ return "", define.ErrRuntimeStopped
}
// Get all containers, filter to only those using the image, and remove those containers
diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go
index e32e6edf6..c363991e6 100644
--- a/libpod/runtime_migrate.go
+++ b/libpod/runtime_migrate.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
"fmt"
+ "github.com/containers/libpod/pkg/util"
"io/ioutil"
"os"
"path/filepath"
@@ -12,7 +13,6 @@ import (
"syscall"
"github.com/containers/libpod/pkg/rootless"
- "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -37,7 +37,9 @@ func stopPauseProcess() error {
if err := os.Remove(pausePidPath); err != nil {
return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath)
}
- syscall.Kill(pausePid, syscall.SIGKILL)
+ if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil {
+ return err
+ }
}
return nil
}
diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go
index b3dd7dabd..66f9b10c9 100644
--- a/libpod/runtime_pod.go
+++ b/libpod/runtime_pod.go
@@ -4,6 +4,7 @@ import (
"context"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
)
@@ -30,7 +31,7 @@ func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool)
defer r.lock.Unlock()
if !r.valid {
- return ErrRuntimeStopped
+ return define.ErrRuntimeStopped
}
if !p.valid {
@@ -53,7 +54,7 @@ func (r *Runtime) GetPod(id string) (*Pod, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
return r.state.Pod(id)
@@ -65,7 +66,7 @@ func (r *Runtime) HasPod(id string) (bool, error) {
defer r.lock.RUnlock()
if !r.valid {
- return false, ErrRuntimeStopped
+ return false, define.ErrRuntimeStopped
}
return r.state.HasPod(id)
@@ -78,7 +79,7 @@ func (r *Runtime) LookupPod(idOrName string) (*Pod, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
return r.state.LookupPod(idOrName)
@@ -93,7 +94,7 @@ func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
pods, err := r.state.AllPods()
@@ -122,7 +123,7 @@ func (r *Runtime) GetAllPods() ([]*Pod, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
return r.state.AllPods()
@@ -137,7 +138,7 @@ func (r *Runtime) GetLatestPod() (*Pod, error) {
return nil, errors.Wrapf(err, "unable to get all pods")
}
if len(pods) == 0 {
- return nil, ErrNoSuchPod
+ return nil, define.ErrNoSuchPod
}
for podIndex, pod := range pods {
createdTime := pod.config.CreatedTime
@@ -159,7 +160,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
containers, err := r.GetRunningContainers()
if err != nil {
@@ -171,7 +172,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) {
pods = append(pods, c.PodID())
pod, err := r.GetPod(c.PodID())
if err != nil {
- if errors.Cause(err) == ErrPodRemoved || errors.Cause(err) == ErrNoSuchPod {
+ if errors.Cause(err) == define.ErrPodRemoved || errors.Cause(err) == define.ErrNoSuchPod {
continue
}
return nil, err
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index 0a5f78cf8..da35b7f93 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -6,6 +6,7 @@ import (
"context"
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
"github.com/opencontainers/image-spec/specs-go/v1"
@@ -104,7 +105,7 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID
// containers in the pod.
func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container, error) {
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, false, nil)
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 124d0daf8..f38e6e7c1 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -9,20 +9,22 @@ import (
"path/filepath"
"strings"
- "github.com/containerd/cgroups"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
+ "github.com/containers/libpod/pkg/cgroups"
+ "github.com/containers/libpod/pkg/rootless"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// NewPod makes a new, empty pod
-func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, error) {
+func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Pod, Err error) {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
pod, err := newPod(r)
@@ -58,6 +60,14 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
pod.lock = lock
pod.config.LockID = pod.lock.ID()
+ defer func() {
+ if Err != nil {
+ if err := pod.lock.Free(); err != nil {
+ logrus.Errorf("Error freeing pod lock after failed creation: %v", err)
+ }
+ }
+ }()
+
pod.valid = true
// Check CGroup parent sanity, and set it if it was not set
@@ -66,7 +76,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
if pod.config.CgroupParent == "" {
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
+ return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
@@ -77,9 +87,13 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
}
case SystemdCgroupsManager:
if pod.config.CgroupParent == "" {
- pod.config.CgroupParent = SystemdDefaultCgroupParent
+ if rootless.IsRootless() {
+ pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent
+ } else {
+ pod.config.CgroupParent = SystemdDefaultCgroupParent
+ }
} else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
- return nil, errors.Wrapf(ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
+ return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
@@ -91,7 +105,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
pod.state.CgroupPath = cgroupPath
}
default:
- return nil, errors.Wrapf(ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager)
+ return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager)
}
if pod.config.UsePodCgroup {
@@ -107,15 +121,17 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
if err := r.state.AddPod(pod); err != nil {
return nil, errors.Wrapf(err, "error adding pod to state")
}
+ defer func() {
+ if Err != nil {
+ if err := r.removePod(ctx, pod, true, true); err != nil {
+ logrus.Errorf("Error removing pod after pause container creation failure: %v", err)
+ }
+ }
+ }()
if pod.HasInfraContainer() {
ctr, err := r.createInfraContainer(ctx, pod)
if err != nil {
- // Tear down pod, as it is assumed a the pod will contain
- // a pause container, and it does not.
- if err2 := r.removePod(ctx, pod, true, true); err2 != nil {
- logrus.Errorf("Error removing pod after pause container creation failure: %v", err2)
- }
return nil, errors.Wrapf(err, "error adding Infra Container")
}
pod.state.InfraContainerID = ctr.ID()
@@ -146,7 +162,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
force = true
}
if !removeCtrs && numCtrs > 0 {
- return errors.Wrapf(ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID())
+ return errors.Wrapf(define.ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID())
}
// Go through and lock all containers so we can operate on them all at
@@ -182,15 +198,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
// would prevent removing the CGroups.
if p.runtime.config.CgroupManager == CgroupfsCgroupsManager {
// Get the conmon CGroup
- v1CGroups := GetV1CGroups(getExcludedCGroups())
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
- conmonCgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(conmonCgroupPath))
+ conmonCgroup, err := cgroups.Load(conmonCgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted {
- if removalErr == nil {
- removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup %s", p.ID(), conmonCgroupPath)
- } else {
- logrus.Errorf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
- }
+ removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup %s", p.ID(), conmonCgroupPath)
}
// New resource limits
@@ -249,9 +260,8 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
// Make sure the conmon cgroup is deleted first
// Since the pod is almost gone, don't bother failing
// hard - instead, just log errors.
- v1CGroups := GetV1CGroups(getExcludedCGroups())
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
- conmonCgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(conmonCgroupPath))
+ conmonCgroup, err := cgroups.Load(conmonCgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted {
if removalErr == nil {
removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID())
@@ -268,7 +278,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
}
}
}
- cgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(p.state.CgroupPath))
+ cgroup, err := cgroups.Load(p.state.CgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted {
if removalErr == nil {
removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID())
@@ -290,7 +300,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
// keep going so we make sure to evict the pod before
// ending up with an inconsistent state.
if removalErr == nil {
- removalErr = errors.Wrapf(ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.CgroupManager, p.ID())
+ removalErr = errors.Wrapf(define.ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.CgroupManager, p.ID())
} else {
logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.CgroupManager, p.ID())
}
diff --git a/libpod/runtime_pod_unsupported.go b/libpod/runtime_pod_unsupported.go
index d2629d5ab..5f0811822 100644
--- a/libpod/runtime_pod_unsupported.go
+++ b/libpod/runtime_pod_unsupported.go
@@ -4,13 +4,15 @@ package libpod
import (
"context"
+
+ "github.com/containers/libpod/libpod/define"
)
// NewPod makes a new, empty pod
func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, error) {
- return nil, ErrOSNotSupported
+ return nil, define.ErrOSNotSupported
}
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) error {
- return ErrOSNotSupported
+ return define.ErrOSNotSupported
}
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index 68c6c107e..d05db936b 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -2,8 +2,8 @@ package libpod
import (
"context"
- "strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -26,7 +26,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error
defer r.lock.Unlock()
if !r.valid {
- return ErrRuntimeStopped
+ return define.ErrRuntimeStopped
}
if !v.valid {
@@ -71,30 +71,21 @@ func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, forc
return deletedVols, nil
}
-// GetVolume retrieves a volume by its name
+// GetVolume retrieves a volume given its full name.
func (r *Runtime) GetVolume(name string) (*Volume, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
vol, err := r.state.Volume(name)
- if err == nil {
- return vol, err
- }
-
- vols, err := r.GetAllVolumes()
if err != nil {
return nil, err
}
- for _, v := range vols {
- if strings.HasPrefix(v.Name(), name) {
- return v, nil
- }
- }
- return nil, errors.Errorf("unable to find volume %s", name)
+
+ return vol, nil
}
// HasVolume checks to see if a volume with the given name exists
@@ -103,7 +94,7 @@ func (r *Runtime) HasVolume(name string) (bool, error) {
defer r.lock.RUnlock()
if !r.valid {
- return false, ErrRuntimeStopped
+ return false, define.ErrRuntimeStopped
}
return r.state.HasVolume(name)
@@ -118,7 +109,7 @@ func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
vols, err := r.state.AllVolumes()
@@ -147,7 +138,7 @@ func (r *Runtime) GetAllVolumes() ([]*Volume, error) {
defer r.lock.RUnlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
return r.state.AllVolumes()
@@ -167,7 +158,7 @@ func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) {
for _, vol := range vols {
if err := r.RemoveVolume(ctx, vol, false); err != nil {
- if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved {
+ if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved {
pruneErrors = append(pruneErrors, err)
}
continue
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index a326ed0e0..ac6fd02c3 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -8,6 +8,7 @@ import (
"path/filepath"
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
@@ -20,7 +21,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
defer r.lock.Unlock()
if !r.valid {
- return nil, ErrRuntimeStopped
+ return nil, define.ErrRuntimeStopped
}
return r.newVolume(ctx, options...)
}
@@ -86,7 +87,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
if ok, _ := r.state.HasVolume(v.Name()); !ok {
return nil
}
- return ErrVolumeRemoved
+ return define.ErrVolumeRemoved
}
deps, err := r.state.VolumeInUse(v)
@@ -96,7 +97,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
if len(deps) != 0 {
depsStr := strings.Join(deps, ", ")
if !force {
- return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
+ return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
}
// We need to remove all containers using the volume
@@ -105,7 +106,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
if err != nil {
// If the container's removed, no point in
// erroring.
- if errors.Cause(err) == ErrNoSuchCtr || errors.Cause(err) == ErrCtrRemoved {
+ if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
continue
}
diff --git a/libpod/runtime_volume_unsupported.go b/libpod/runtime_volume_unsupported.go
index 5fe487114..1cbf2699a 100644
--- a/libpod/runtime_volume_unsupported.go
+++ b/libpod/runtime_volume_unsupported.go
@@ -4,16 +4,18 @@ package libpod
import (
"context"
+
+ "github.com/containers/libpod/libpod/define"
)
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
- return nil, ErrNotImplemented
+ return nil, define.ErrNotImplemented
}
func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
- return nil, ErrNotImplemented
+ return nil, define.ErrNotImplemented
}
diff --git a/libpod/state_test.go b/libpod/state_test.go
index be68a2d69..26a1dee7d 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/containers/storage"
"github.com/stretchr/testify/assert"
@@ -700,7 +701,7 @@ func TestSaveAndUpdateContainer(t *testing.T) {
retrievedCtr, err := state.Container(testCtr.ID())
require.NoError(t, err)
- retrievedCtr.state.State = ContainerStateStopped
+ retrievedCtr.state.State = define.ContainerStateStopped
retrievedCtr.state.ExitCode = 127
retrievedCtr.state.FinishedTime = time.Now()
@@ -729,7 +730,7 @@ func TestSaveAndUpdateContainerSameNamespaceSucceeds(t *testing.T) {
retrievedCtr, err := state.Container(testCtr.ID())
assert.NoError(t, err)
- retrievedCtr.state.State = ContainerStateStopped
+ retrievedCtr.state.State = define.ContainerStateStopped
retrievedCtr.state.ExitCode = 127
retrievedCtr.state.FinishedTime = time.Now()
diff --git a/libpod/stats.go b/libpod/stats.go
index c58a46135..8101fbbbd 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -3,11 +3,13 @@
package libpod
import (
+ "runtime"
"strings"
"syscall"
"time"
- "github.com/containerd/cgroups"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/cgroups"
"github.com/pkg/errors"
)
@@ -25,30 +27,25 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
}
}
- if c.state.State != ContainerStateRunning {
- return stats, ErrCtrStateInvalid
+ if c.state.State != define.ContainerStateRunning {
+ return stats, define.ErrCtrStateInvalid
}
cgroupPath, err := c.CGroupPath()
if err != nil {
return nil, err
}
- v1CGroups := GetV1CGroups(getExcludedCGroups())
- cgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(cgroupPath))
+ cgroup, err := cgroups.Load(cgroupPath)
if err != nil {
return stats, errors.Wrapf(err, "unable to load cgroup at %s", cgroupPath)
}
// Ubuntu does not have swap memory in cgroups because swap is often not enabled.
- cgroupStats, err := cgroup.Stat(cgroups.IgnoreNotExist)
+ cgroupStats, err := cgroup.Stat()
if err != nil {
return stats, errors.Wrapf(err, "unable to obtain cgroup stats")
}
conState := c.state.State
- if err != nil {
- return stats, errors.Wrapf(err, "unable to determine container state")
- }
-
netStats, err := getContainerNetIO(c)
if err != nil {
return nil, err
@@ -61,7 +58,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
stats.MemLimit = getMemLimit(cgroupStats.Memory.Usage.Limit)
stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100
stats.PIDs = 0
- if conState == ContainerStateRunning {
+ if conState == define.ContainerStateRunning {
stats.PIDs = cgroupStats.Pids.Current
}
stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats)
@@ -89,7 +86,7 @@ func getMemLimit(cgroupLimit uint64) uint64 {
return cgroupLimit
}
- physicalLimit := uint64(si.Totalram)
+ physicalLimit := si.Totalram
if cgroupLimit > physicalLimit {
return physicalLimit
}
@@ -105,7 +102,11 @@ func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, previousSystem uin
if systemDelta > 0.0 && cpuDelta > 0.0 {
// gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running
// at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage
- cpuPercent = (cpuDelta / systemDelta) * float64(len(stats.CPU.Usage.PerCPU)) * 100
+ nCPUS := len(stats.CPU.Usage.PerCPU)
+ if nCPUS == 0 {
+ nCPUS = runtime.NumCPU()
+ }
+ cpuPercent = (cpuDelta / systemDelta) * float64(nCPUS) * 100
}
return cpuPercent
}
diff --git a/libpod/stats_unsupported.go b/libpod/stats_unsupported.go
index 7117413eb..ec19a89a1 100644
--- a/libpod/stats_unsupported.go
+++ b/libpod/stats_unsupported.go
@@ -2,7 +2,9 @@
package libpod
+import "github.com/containers/libpod/libpod/define"
+
// GetContainerStats gets the running stats for a given container
func (c *Container) GetContainerStats(previousStats *ContainerStats) (*ContainerStats, error) {
- return nil, ErrOSNotSupported
+ return nil, define.ErrOSNotSupported
}
diff --git a/libpod/storage.go b/libpod/storage.go
index 1a7b13da6..0814672be 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -6,9 +6,10 @@ import (
istorage "github.com/containers/image/storage"
"github.com/containers/image/types"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/storage"
"github.com/opencontainers/image-spec/specs-go/v1"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -71,7 +72,7 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte
if imageName != "" {
var ref types.ImageReference
if containerName == "" {
- return ContainerInfo{}, ErrEmptyID
+ return ContainerInfo{}, define.ErrEmptyID
}
// Check if we have the specified image.
ref, err := istorage.Transport.ParseStoreReference(r.store, imageID)
@@ -175,7 +176,7 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte
func (r *storageService) DeleteContainer(idOrName string) error {
if idOrName == "" {
- return ErrEmptyID
+ return define.ErrEmptyID
}
container, err := r.store.Container(idOrName)
if err != nil {
@@ -214,7 +215,7 @@ func (r *storageService) MountContainerImage(idOrName string) (string, error) {
container, err := r.store.Container(idOrName)
if err != nil {
if errors.Cause(err) == storage.ErrContainerUnknown {
- return "", ErrNoSuchCtr
+ return "", define.ErrNoSuchCtr
}
return "", err
}
@@ -233,7 +234,7 @@ func (r *storageService) MountContainerImage(idOrName string) (string, error) {
func (r *storageService) UnmountContainerImage(idOrName string, force bool) (bool, error) {
if idOrName == "" {
- return false, ErrEmptyID
+ return false, define.ErrEmptyID
}
container, err := r.store.Container(idOrName)
if err != nil {
@@ -260,7 +261,7 @@ func (r *storageService) UnmountContainerImage(idOrName string, force bool) (boo
func (r *storageService) MountedContainerImage(idOrName string) (int, error) {
if idOrName == "" {
- return 0, ErrEmptyID
+ return 0, define.ErrEmptyID
}
container, err := r.store.Container(idOrName)
if err != nil {
@@ -277,7 +278,7 @@ func (r *storageService) GetMountpoint(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
if errors.Cause(err) == storage.ErrContainerUnknown {
- return "", ErrNoSuchCtr
+ return "", define.ErrNoSuchCtr
}
return "", err
}
@@ -293,7 +294,7 @@ func (r *storageService) GetWorkDir(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
if errors.Cause(err) == storage.ErrContainerUnknown {
- return "", ErrNoSuchCtr
+ return "", define.ErrNoSuchCtr
}
return "", err
}
@@ -304,7 +305,7 @@ func (r *storageService) GetRunDir(id string) (string, error) {
container, err := r.store.Container(id)
if err != nil {
if errors.Cause(err) == storage.ErrContainerUnknown {
- return "", ErrNoSuchCtr
+ return "", define.ErrNoSuchCtr
}
return "", err
}
diff --git a/libpod/util.go b/libpod/util.go
index 3a15f9e39..b60575264 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -9,8 +9,7 @@ import (
"strings"
"time"
- "github.com/containers/image/signature"
- "github.com/containers/image/types"
+ "github.com/containers/libpod/libpod/define"
"github.com/fsnotify/fsnotify"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -23,17 +22,6 @@ const (
DefaultTransport = "docker://"
)
-// OpenExclusiveFile opens a file for writing and ensure it doesn't already exist
-func OpenExclusiveFile(path string) (*os.File, error) {
- baseDir := filepath.Dir(path)
- if baseDir != "" {
- if _, err := os.Stat(baseDir); err != nil {
- return nil, err
- }
- }
- return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
-}
-
// FuncTimer helps measure the execution time of a function
// For debug purposes, do not leave in code
// used like defer FuncTimer("foo")
@@ -42,24 +30,6 @@ func FuncTimer(funcName string) {
fmt.Printf("%s executed in %d ms\n", funcName, elapsed)
}
-// CopyStringStringMap deep copies a map[string]string and returns the result
-func CopyStringStringMap(m map[string]string) map[string]string {
- n := map[string]string{}
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-// GetPolicyContext creates a signature policy context for the given signature policy path
-func GetPolicyContext(path string) (*signature.PolicyContext, error) {
- policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path})
- if err != nil {
- return nil, err
- }
- return signature.NewPolicyContext(policy)
-}
-
// RemoveScientificNotationFromFloat returns a float without any
// scientific notation if the number has any.
// golang does not handle conversion of float64s that have scientific
@@ -126,7 +96,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
return false, errors.Wrapf(err, "checking file %s", path)
}
case <-timeoutChan:
- return false, errors.Wrapf(ErrInternal, "timed out waiting for file %s", path)
+ return false, errors.Wrapf(define.ErrInternal, "timed out waiting for file %s", path)
}
}
}
@@ -156,15 +126,15 @@ func sortMounts(m []spec.Mount) []spec.Mount {
func validPodNSOption(p *Pod, ctrPod string) error {
if p == nil {
- return errors.Wrapf(ErrInvalidArg, "pod passed in was nil. Container may not be associated with a pod")
+ return errors.Wrapf(define.ErrInvalidArg, "pod passed in was nil. Container may not be associated with a pod")
}
if ctrPod == "" {
- return errors.Wrapf(ErrInvalidArg, "container is not a member of any pod")
+ return errors.Wrapf(define.ErrInvalidArg, "container is not a member of any pod")
}
if ctrPod != p.ID() {
- return errors.Wrapf(ErrInvalidArg, "pod passed in is not the pod the container is associated with")
+ return errors.Wrapf(define.ErrInvalidArg, "pod passed in is not the pod the container is associated with")
}
return nil
}
diff --git a/libpod/util_linux.go b/libpod/util_linux.go
index a801df2ee..78cbc75a7 100644
--- a/libpod/util_linux.go
+++ b/libpod/util_linux.go
@@ -6,9 +6,9 @@ import (
"fmt"
"strings"
- "github.com/containerd/cgroups"
- "github.com/containers/libpod/pkg/util"
- spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/cgroups"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -34,24 +34,31 @@ func systemdSliceFromPath(parent, name string) (string, error) {
return cgroupPath, nil
}
+func getDefaultSystemdCgroup() string {
+ if rootless.IsRootless() {
+ return SystemdDefaultRootlessCgroupParent
+ }
+ return SystemdDefaultCgroupParent
+}
+
// makeSystemdCgroup creates a systemd CGroup at the given location.
func makeSystemdCgroup(path string) error {
- controller, err := cgroups.NewSystemd(SystemdDefaultCgroupParent)
+ controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup())
if err != nil {
return err
}
- return controller.Create(path, &spec.LinuxResources{})
+ return controller.CreateSystemdUnit(path)
}
// deleteSystemdCgroup deletes the systemd cgroup at the given location
func deleteSystemdCgroup(path string) error {
- controller, err := cgroups.NewSystemd(SystemdDefaultCgroupParent)
+ controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup())
if err != nil {
return err
}
- return controller.Delete(path)
+ return controller.DeleteByPath(path)
}
// assembleSystemdCgroupName creates a systemd cgroup path given a base and
@@ -61,7 +68,7 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) {
const sliceSuffix = ".slice"
if !strings.HasSuffix(baseSlice, sliceSuffix) {
- return "", errors.Wrapf(ErrInvalidArg, "cannot assemble cgroup path with base %q - must end in .slice", baseSlice)
+ return "", errors.Wrapf(define.ErrInvalidArg, "cannot assemble cgroup path with base %q - must end in .slice", baseSlice)
}
noSlice := strings.TrimSuffix(baseSlice, sliceSuffix)
@@ -70,29 +77,6 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) {
return final, nil
}
-// GetV1CGroups gets the V1 cgroup subsystems and then "filters"
-// out any subsystems that are provided by the caller. Passing nil
-// for excludes will return the subsystems unfiltered.
-//func GetV1CGroups(excludes []string) ([]cgroups.Subsystem, error) {
-func GetV1CGroups(excludes []string) cgroups.Hierarchy {
- return func() ([]cgroups.Subsystem, error) {
- var filtered []cgroups.Subsystem
-
- subSystem, err := cgroups.V1()
- if err != nil {
- return nil, err
- }
- for _, s := range subSystem {
- // If the name of the subsystem is not in the list of excludes, then
- // add it as a keeper.
- if !util.StringInSlice(string(s.Name()), excludes) {
- filtered = append(filtered, s)
- }
- }
- return filtered, nil
- }
-}
-
// LabelVolumePath takes a mount path for a volume and gives it an
// selinux label of either shared or not
func LabelVolumePath(path string, shared bool) error {
diff --git a/libpod/util_unsupported.go b/libpod/util_unsupported.go
index 940006e69..58b0dfbcd 100644
--- a/libpod/util_unsupported.go
+++ b/libpod/util_unsupported.go
@@ -3,27 +3,28 @@
package libpod
import (
+ "github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
)
func systemdSliceFromPath(parent, name string) (string, error) {
- return "", errors.Wrapf(ErrOSNotSupported, "cgroups are not supported on non-linux OSes")
+ return "", errors.Wrapf(define.ErrOSNotSupported, "cgroups are not supported on non-linux OSes")
}
func makeSystemdCgroup(path string) error {
- return errors.Wrapf(ErrOSNotSupported, "cgroups are not supported on non-linux OSes")
+ return errors.Wrapf(define.ErrOSNotSupported, "cgroups are not supported on non-linux OSes")
}
func deleteSystemdCgroup(path string) error {
- return errors.Wrapf(ErrOSNotSupported, "cgroups are not supported on non-linux OSes")
+ return errors.Wrapf(define.ErrOSNotSupported, "cgroups are not supported on non-linux OSes")
}
func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) {
- return "", errors.Wrapf(ErrOSNotSupported, "cgroups are not supported on non-linux OSes")
+ return "", errors.Wrapf(define.ErrOSNotSupported, "cgroups are not supported on non-linux OSes")
}
// LabelVolumePath takes a mount path for a volume and gives it an
// selinux label of either shared or not
func LabelVolumePath(path string, shared bool) error {
- return ErrNotImplemented
+ return define.ErrNotImplemented
}
diff --git a/libpod/volume.go b/libpod/volume.go
index 0b37d44ef..9ed2ff087 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -10,7 +10,6 @@ type Volume struct {
}
// VolumeConfig holds the volume's config information
-//easyjson:json
type VolumeConfig struct {
// Name of the volume
Name string `json:"name"`