aboutsummaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go463
-rw-r--r--libpod/boltdb_state_internal.go177
-rw-r--r--libpod/common/common.go23
-rw-r--r--libpod/common/docker_registry_options.go35
-rw-r--r--libpod/common/output_interfaces.go1
-rw-r--r--libpod/common_test.go5
-rw-r--r--libpod/container.go34
-rw-r--r--libpod/container_api.go87
-rw-r--r--libpod/container_easyjson.go13
-rw-r--r--libpod/container_graph_test.go4
-rw-r--r--libpod/container_inspect.go16
-rw-r--r--libpod/container_internal.go343
-rw-r--r--libpod/container_internal_linux.go405
-rw-r--r--libpod/container_internal_unsupported.go4
-rw-r--r--libpod/errors.go14
-rw-r--r--libpod/image/docker_registry_options.go5
-rw-r--r--libpod/image/errors.go15
-rw-r--r--libpod/image/image.go34
-rw-r--r--libpod/image/image_test.go8
-rw-r--r--libpod/image/prune.go26
-rw-r--r--libpod/image/pull.go33
-rw-r--r--libpod/image/utils.go22
-rw-r--r--libpod/in_memory_state.go180
-rw-r--r--libpod/info.go2
-rw-r--r--libpod/kube.go440
-rw-r--r--libpod/mounts_linux.go18
-rw-r--r--libpod/networking_linux.go11
-rw-r--r--libpod/oci.go50
-rw-r--r--libpod/oci_linux.go19
-rw-r--r--libpod/oci_unsupported.go2
-rw-r--r--libpod/options.go172
-rw-r--r--libpod/pod.go4
-rw-r--r--libpod/pod_api.go17
-rw-r--r--libpod/pod_easyjson.go128
-rw-r--r--libpod/pod_internal.go2
-rw-r--r--libpod/runtime.go202
-rw-r--r--libpod/runtime_ctr.go41
-rw-r--r--libpod/runtime_img.go35
-rw-r--r--libpod/runtime_pod_infra_linux.go10
-rw-r--r--libpod/runtime_volume.go107
-rw-r--r--libpod/runtime_volume_linux.go132
-rw-r--r--libpod/state.go49
-rw-r--r--libpod/state_test.go7
-rw-r--r--libpod/testdata/config.toml2
-rw-r--r--libpod/util.go69
-rw-r--r--libpod/util_linux.go24
-rw-r--r--libpod/volume.go63
-rw-r--r--libpod/volume_internal.go29
48 files changed, 2899 insertions, 683 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 42f029379..b154d8bda 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -3,7 +3,6 @@ package libpod
import (
"bytes"
"encoding/json"
- "os"
"strings"
"sync"
@@ -19,7 +18,6 @@ type BoltState struct {
dbLock sync.Mutex
namespace string
namespaceBytes []byte
- lockDir string
runtime *Runtime
}
@@ -52,25 +50,15 @@ type BoltState struct {
// containers/storage do not occur.
// NewBoltState creates a new bolt-backed state database
-func NewBoltState(path, lockDir string, runtime *Runtime) (State, error) {
+func NewBoltState(path string, runtime *Runtime) (State, error) {
state := new(BoltState)
state.dbPath = path
- state.lockDir = lockDir
state.runtime = runtime
state.namespace = ""
state.namespaceBytes = nil
logrus.Debugf("Initializing boltdb state at %s", path)
- // Make the directory that will hold container lockfiles
- if err := os.MkdirAll(lockDir, 0750); err != nil {
- // The directory is allowed to exist
- if !os.IsExist(err) {
- return nil, errors.Wrapf(err, "error creating lockfiles dir %s", lockDir)
- }
- }
- state.lockDir = lockDir
-
db, err := bolt.Open(path, 0600, nil)
if err != nil {
return nil, errors.Wrapf(err, "error opening database %s", path)
@@ -106,6 +94,12 @@ func NewBoltState(path, lockDir string, runtime *Runtime) (State, error) {
if _, err := tx.CreateBucketIfNotExists(allPodsBkt); err != nil {
return errors.Wrapf(err, "error creating all pods bucket")
}
+ if _, err := tx.CreateBucketIfNotExists(volBkt); err != nil {
+ return errors.Wrapf(err, "error creating volume bucket")
+ }
+ if _, err := tx.CreateBucketIfNotExists(allVolsBkt); err != nil {
+ return errors.Wrapf(err, "error creating all volumes bucket")
+ }
if _, err := tx.CreateBucketIfNotExists(runtimeConfigBkt); err != nil {
return errors.Wrapf(err, "error creating runtime-config bucket")
}
@@ -115,11 +109,6 @@ func NewBoltState(path, lockDir string, runtime *Runtime) (State, error) {
return nil, errors.Wrapf(err, "error creating initial database layout")
}
- // Check runtime configuration
- if err := checkRuntimeConfig(db, runtime); err != nil {
- return nil, err
- }
-
state.valid = true
return state, nil
@@ -240,6 +229,72 @@ func (s *BoltState) Refresh() error {
return err
}
+// GetDBConfig retrieves runtime configuration fields that were created when
+// the database was first initialized
+func (s *BoltState) GetDBConfig() (*DBConfig, error) {
+ if !s.valid {
+ return nil, ErrDBClosed
+ }
+
+ cfg := new(DBConfig)
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ configBucket, err := getRuntimeConfigBucket(tx)
+ if err != nil {
+ return nil
+ }
+
+ // Some of these may be nil
+ // When we convert to string, Go will coerce them to ""
+ // That's probably fine - we could raise an error if the key is
+ // missing, but just not including it is also OK.
+ libpodRoot := configBucket.Get(staticDirKey)
+ libpodTmp := configBucket.Get(tmpDirKey)
+ storageRoot := configBucket.Get(graphRootKey)
+ storageTmp := configBucket.Get(runRootKey)
+ graphDriver := configBucket.Get(graphDriverKey)
+
+ cfg.LibpodRoot = string(libpodRoot)
+ cfg.LibpodTmp = string(libpodTmp)
+ cfg.StorageRoot = string(storageRoot)
+ cfg.StorageTmp = string(storageTmp)
+ cfg.GraphDriver = string(graphDriver)
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return cfg, nil
+}
+
+// ValidateDBConfig validates paths in the given runtime against the database
+func (s *BoltState) ValidateDBConfig(runtime *Runtime) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ // Check runtime configuration
+ if err := checkRuntimeConfig(db, runtime); err != nil {
+ return err
+ }
+
+ return nil
+}
+
// SetNamespace sets the namespace that will be used for container and pod
// retrieval
func (s *BoltState) SetNamespace(ns string) error {
@@ -1101,6 +1156,378 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
return ctrs, nil
}
+// AddVolume adds the given volume to the state. It also adds ctrDepID to
+// the sub bucket holding the container dependencies that this volume has
+func (s *BoltState) AddVolume(volume *Volume) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !volume.valid {
+ return ErrVolumeRemoved
+ }
+
+ volName := []byte(volume.Name())
+
+ volConfigJSON, err := json.Marshal(volume.config)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling volume %s config to JSON", volume.Name())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ allVolsBkt, err := getAllVolsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ // Check if we already have a volume with the given name
+ volExists := allVolsBkt.Get(volName)
+ if volExists != nil {
+ return errors.Wrapf(ErrVolumeExists, "name %s is in use", volume.Name())
+ }
+
+ // We are good to add the volume
+ // Make a bucket for it
+ newVol, err := volBkt.CreateBucket(volName)
+ if err != nil {
+ return errors.Wrapf(err, "error creating bucket for volume %s", volume.Name())
+ }
+
+ // Make a subbucket for the containers using the volume. Dependent container IDs will be addedremoved to
+ // this bucket in addcontainer/removeContainer
+ if _, err := newVol.CreateBucket(volDependenciesBkt); err != nil {
+ return errors.Wrapf(err, "error creating bucket for containers using volume %s", volume.Name())
+ }
+
+ if err := newVol.Put(configKey, volConfigJSON); err != nil {
+ return errors.Wrapf(err, "error storing volume %s configuration in DB", volume.Name())
+ }
+
+ if err := allVolsBkt.Put(volName, volName); err != nil {
+ return errors.Wrapf(err, "error storing volume %s in all volumes bucket in DB", volume.Name())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// RemoveVolCtrDep updates the container dependencies sub bucket of the given volume.
+// It deletes it from the bucket when found.
+// This is important when force removing a volume and we want to get rid of the dependencies.
+func (s *BoltState) RemoveVolCtrDep(volume *Volume, ctrID string) error {
+ if ctrID == "" {
+ return nil
+ }
+
+ if !s.valid {
+ return ErrDBBadConfig
+ }
+
+ if !volume.valid {
+ return ErrVolumeRemoved
+ }
+
+ volName := []byte(volume.Name())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBkt.Bucket(volName)
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
+ }
+
+ // Make a subbucket for the containers using the volume
+ ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ depCtrID := []byte(ctrID)
+ if depExists := ctrDepsBkt.Get(depCtrID); depExists != nil {
+ if err := ctrDepsBkt.Delete(depCtrID); err != nil {
+ return errors.Wrapf(err, "error deleting container dependencies %q for volume %s in ctrDependencies bucket in DB", ctrID, volume.Name())
+ }
+ }
+
+ return nil
+ })
+ return err
+}
+
+// RemoveVolume removes the given volume from the state
+func (s *BoltState) RemoveVolume(volume *Volume) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !volume.valid {
+ return ErrVolumeRemoved
+ }
+
+ volName := []byte(volume.Name())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ allVolsBkt, err := getAllVolsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ // Check if the volume exists
+ volDB := volBkt.Bucket(volName)
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(ErrNoSuchVolume, "volume %s does not exist in DB", volume.Name())
+ }
+
+ // Check if volume is not being used by any container
+ // This should never be nil
+ // But if it is, we can assume that no containers are using
+ // the volume.
+ volCtrsBkt := volDB.Bucket(volDependenciesBkt)
+ if volCtrsBkt != nil {
+ var deps []string
+ err = volCtrsBkt.ForEach(func(id, value []byte) error {
+ deps = append(deps, string(id))
+ return nil
+ })
+ if err != nil {
+ return errors.Wrapf(err, "error getting list of dependencies from dependencies bucket for volumes %q", volume.Name())
+ }
+ if len(deps) > 0 {
+ return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by container(s) %s", volume.Name(), strings.Join(deps, ","))
+ }
+ }
+
+ // volume is ready for removal
+ // Let's kick it out
+ if err := allVolsBkt.Delete(volName); err != nil {
+ return errors.Wrapf(err, "error removing volume %s from all volumes bucket in DB", volume.Name())
+ }
+ if err := volBkt.DeleteBucket(volName); err != nil {
+ return errors.Wrapf(err, "error removing volume %s from DB", volume.Name())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// AllVolumes returns all volumes present in the state
+func (s *BoltState) AllVolumes() ([]*Volume, error) {
+ if !s.valid {
+ return nil, ErrDBClosed
+ }
+
+ volumes := []*Volume{}
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ allVolsBucket, err := getAllVolsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volBucket, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+ err = allVolsBucket.ForEach(func(id, name []byte) error {
+ volExists := volBucket.Bucket(id)
+ // This check can be removed if performance becomes an
+ // issue, but much less helpful errors will be produced
+ if volExists == nil {
+ return errors.Wrapf(ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id))
+ }
+
+ volume := new(Volume)
+ volume.config = new(VolumeConfig)
+
+ if err := s.getVolumeFromDB(id, volume, volBucket); err != nil {
+ if errors.Cause(err) != ErrNSMismatch {
+ logrus.Errorf("Error retrieving volume %s from the database: %v", string(id), err)
+ }
+ } else {
+ volumes = append(volumes, volume)
+ }
+
+ return nil
+ })
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return volumes, nil
+}
+
+// Volume retrieves a volume from full name
+func (s *BoltState) Volume(name string) (*Volume, error) {
+ if name == "" {
+ return nil, ErrEmptyID
+ }
+
+ if !s.valid {
+ return nil, ErrDBClosed
+ }
+
+ volName := []byte(name)
+
+ volume := new(Volume)
+ volume.config = new(VolumeConfig)
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ return s.getVolumeFromDB(volName, volume, volBkt)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return volume, nil
+}
+
+// HasVolume returns true if the given volume exists in the state, otherwise it returns false
+func (s *BoltState) HasVolume(name string) (bool, error) {
+ if name == "" {
+ return false, ErrEmptyID
+ }
+
+ if !s.valid {
+ return false, ErrDBClosed
+ }
+
+ volName := []byte(name)
+
+ exists := false
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return false, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBkt.Bucket(volName)
+ if volDB != nil {
+ exists = true
+ }
+
+ return nil
+ })
+ if err != nil {
+ return false, err
+ }
+
+ return exists, nil
+}
+
+// VolumeInUse checks if any container is using the volume
+// It returns a slice of the IDs of the containers using the given
+// volume. If the slice is empty, no containers use the given volume
+func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
+ if !s.valid {
+ return nil, ErrDBClosed
+ }
+
+ if !volume.valid {
+ return nil, ErrVolumeRemoved
+ }
+
+ depCtrs := []string{}
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ volBucket, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ volDB := volBucket.Bucket([]byte(volume.Name()))
+ if volDB == nil {
+ volume.valid = false
+ return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in DB", volume.Name())
+ }
+
+ dependsBkt := volDB.Bucket(volDependenciesBkt)
+ if dependsBkt == nil {
+ return errors.Wrapf(ErrInternal, "volume %s has no dependencies bucket", volume.Name())
+ }
+
+ // Iterate through and add dependencies
+ err = dependsBkt.ForEach(func(id, value []byte) error {
+ depCtrs = append(depCtrs, string(id))
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return depCtrs, nil
+}
+
// AddPod adds the given pod to the state.
func (s *BoltState) AddPod(pod *Pod) error {
if !s.valid {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index cc7d106cc..06f8dcb24 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -21,15 +21,25 @@ const (
allCtrsName = "all-ctrs"
podName = "pod"
allPodsName = "allPods"
+ volName = "vol"
+ allVolsName = "allVolumes"
runtimeConfigName = "runtime-config"
- configName = "config"
- stateName = "state"
- dependenciesName = "dependencies"
- netNSName = "netns"
- containersName = "containers"
- podIDName = "pod-id"
- namespaceName = "namespace"
+ configName = "config"
+ stateName = "state"
+ dependenciesName = "dependencies"
+ volCtrDependencies = "vol-dependencies"
+ netNSName = "netns"
+ containersName = "containers"
+ podIDName = "pod-id"
+ namespaceName = "namespace"
+
+ staticDirName = "static-dir"
+ tmpDirName = "tmp-dir"
+ runRootName = "run-root"
+ graphRootName = "graph-root"
+ graphDriverName = "graph-driver-name"
+ osName = "os"
)
var (
@@ -40,30 +50,31 @@ var (
allCtrsBkt = []byte(allCtrsName)
podBkt = []byte(podName)
allPodsBkt = []byte(allPodsName)
+ volBkt = []byte(volName)
+ allVolsBkt = []byte(allVolsName)
runtimeConfigBkt = []byte(runtimeConfigName)
- configKey = []byte(configName)
- stateKey = []byte(stateName)
- dependenciesBkt = []byte(dependenciesName)
- netNSKey = []byte(netNSName)
- containersBkt = []byte(containersName)
- podIDKey = []byte(podIDName)
- namespaceKey = []byte(namespaceName)
+ configKey = []byte(configName)
+ stateKey = []byte(stateName)
+ dependenciesBkt = []byte(dependenciesName)
+ volDependenciesBkt = []byte(volCtrDependencies)
+ netNSKey = []byte(netNSName)
+ containersBkt = []byte(containersName)
+ podIDKey = []byte(podIDName)
+ namespaceKey = []byte(namespaceName)
+
+ staticDirKey = []byte(staticDirName)
+ tmpDirKey = []byte(tmpDirName)
+ runRootKey = []byte(runRootName)
+ graphRootKey = []byte(graphRootName)
+ graphDriverKey = []byte(graphDriverName)
+ osKey = []byte(osName)
)
// Check if the configuration of the database is compatible with the
// configuration of the runtime opening it
// If there is no runtime configuration loaded, load our own
func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
- var (
- staticDir = []byte("static-dir")
- tmpDir = []byte("tmp-dir")
- runRoot = []byte("run-root")
- graphRoot = []byte("graph-root")
- graphDriverName = []byte("graph-driver-name")
- osKey = []byte("os")
- )
-
err := db.Update(func(tx *bolt.Tx) error {
configBkt, err := getRuntimeConfigBucket(tx)
if err != nil {
@@ -74,31 +85,31 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
return err
}
- if err := validateDBAgainstConfig(configBkt, "static dir",
- rt.config.StaticDir, staticDir, ""); err != nil {
+ if err := validateDBAgainstConfig(configBkt, "libpod root directory (staticdir)",
+ rt.config.StaticDir, staticDirKey, ""); err != nil {
return err
}
- if err := validateDBAgainstConfig(configBkt, "tmp dir",
- rt.config.TmpDir, tmpDir, ""); err != nil {
+ if err := validateDBAgainstConfig(configBkt, "libpod temporary files directory (tmpdir)",
+ rt.config.TmpDir, tmpDirKey, ""); err != nil {
return err
}
- if err := validateDBAgainstConfig(configBkt, "run root",
- rt.config.StorageConfig.RunRoot, runRoot,
+ if err := validateDBAgainstConfig(configBkt, "storage temporary directory (runroot)",
+ rt.config.StorageConfig.RunRoot, runRootKey,
storage.DefaultStoreOptions.RunRoot); err != nil {
return err
}
- if err := validateDBAgainstConfig(configBkt, "graph root",
- rt.config.StorageConfig.GraphRoot, graphRoot,
+ if err := validateDBAgainstConfig(configBkt, "storage graph root directory (graphroot)",
+ rt.config.StorageConfig.GraphRoot, graphRootKey,
storage.DefaultStoreOptions.GraphRoot); err != nil {
return err
}
- return validateDBAgainstConfig(configBkt, "graph driver name",
+ return validateDBAgainstConfig(configBkt, "storage graph driver",
rt.config.StorageConfig.GraphDriverName,
- graphDriverName,
+ graphDriverKey,
storage.DefaultStoreOptions.GraphDriverName)
})
@@ -229,6 +240,22 @@ func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
return bkt, nil
}
+func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(volBkt)
+ if bkt == nil {
+ return nil, errors.Wrapf(ErrDBBadConfig, "volumes bucket not found in DB")
+ }
+ return bkt, nil
+}
+
+func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(allVolsBkt)
+ if bkt == nil {
+ return nil, errors.Wrapf(ErrDBBadConfig, "all volumes bucket not found in DB")
+ }
+ return bkt, nil
+}
+
func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(runtimeConfigBkt)
if bkt == nil {
@@ -261,7 +288,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
}
// Get the lock
- lockPath := filepath.Join(s.lockDir, string(id))
+ lockPath := filepath.Join(s.runtime.lockDir, string(id))
lock, err := storage.GetLockfile(lockPath)
if err != nil {
return errors.Wrapf(err, "error retrieving lockfile for container %s", string(id))
@@ -297,7 +324,7 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
}
// Get the lock
- lockPath := filepath.Join(s.lockDir, string(id))
+ lockPath := filepath.Join(s.runtime.lockDir, string(id))
lock, err := storage.GetLockfile(lockPath)
if err != nil {
return errors.Wrapf(err, "error retrieving lockfile for pod %s", string(id))
@@ -310,6 +337,35 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
return nil
}
+func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bucket) error {
+ volDB := volBkt.Bucket(name)
+ if volDB == nil {
+ return errors.Wrapf(ErrNoSuchVolume, "volume with name %s not found", string(name))
+ }
+
+ volConfigBytes := volDB.Get(configKey)
+ if volConfigBytes == nil {
+ return errors.Wrapf(ErrInternal, "volume %s is missing configuration key in DB", string(name))
+ }
+
+ if err := json.Unmarshal(volConfigBytes, volume.config); err != nil {
+ return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
+ }
+
+ // Get the lock
+ lockPath := filepath.Join(s.runtime.lockDir, string(name))
+ lock, err := storage.GetLockfile(lockPath)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
+ }
+ volume.lock = lock
+
+ volume.runtime = s.runtime
+ volume.valid = true
+
+ return nil
+}
+
// Add a container to the DB
// If pod is not nil, the container is added to the pod as well
func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
@@ -371,6 +427,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return err
}
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
// If a pod was given, check if it exists
var podDB *bolt.Bucket
var podCtrs *bolt.Bucket
@@ -503,6 +564,27 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
}
}
+ // Add container to volume dependencies bucket if container is using a named volume
+ if ctr.runtime.config.VolumePath == "" {
+ return nil
+ }
+ for _, vol := range ctr.config.Spec.Mounts {
+ if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
+ volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
+ volDB := volBkt.Bucket([]byte(volName))
+ if volDB == nil {
+ return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database", volName)
+ }
+
+ ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
+ if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
+ return errors.Wrapf(err, "error storing container dependencies %q for volume %s in ctrDependencies bucket in DB", ctr.ID(), volName)
+ }
+ }
+ }
+ }
+
return nil
})
return err
@@ -540,6 +622,11 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
return err
}
+ volBkt, err := getVolBucket(tx)
+ if err != nil {
+ return err
+ }
+
// Does the pod exist?
var podDB *bolt.Bucket
if pod != nil {
@@ -658,5 +745,25 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
}
+ // Remove container from volume dependencies bucket if container is using a named volume
+ for _, vol := range ctr.config.Spec.Mounts {
+ if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
+ volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
+
+ volDB := volBkt.Bucket([]byte(volName))
+ if volDB == nil {
+ // Let's assume the volume was already deleted and continue to remove the container
+ continue
+ }
+
+ ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
+ if depExists := ctrDepsBkt.Get(ctrID); depExists != nil {
+ if err := ctrDepsBkt.Delete(ctrID); err != nil {
+ return errors.Wrapf(err, "error deleting container dependencies %q for volume %s in ctrDependencies bucket in DB", ctr.ID(), volName)
+ }
+ }
+ }
+ }
+
return nil
}
diff --git a/libpod/common/common.go b/libpod/common/common.go
index 932f1f6da..5d10bee36 100644
--- a/libpod/common/common.go
+++ b/libpod/common/common.go
@@ -1,32 +1,9 @@
package common
import (
- "io"
-
- cp "github.com/containers/image/copy"
"github.com/containers/image/types"
)
-// GetCopyOptions constructs a new containers/image/copy.Options{} struct from the given parameters
-func GetCopyOptions(reportWriter io.Writer, signaturePolicyPath string, srcDockerRegistry, destDockerRegistry *DockerRegistryOptions, signing SigningOptions, authFile, manifestType string, forceCompress bool) *cp.Options {
- if srcDockerRegistry == nil {
- srcDockerRegistry = &DockerRegistryOptions{}
- }
- if destDockerRegistry == nil {
- destDockerRegistry = &DockerRegistryOptions{}
- }
- srcContext := srcDockerRegistry.GetSystemContext(signaturePolicyPath, authFile, forceCompress)
- destContext := destDockerRegistry.GetSystemContext(signaturePolicyPath, authFile, forceCompress)
- return &cp.Options{
- RemoveSignatures: signing.RemoveSignatures,
- SignBy: signing.SignBy,
- ReportWriter: reportWriter,
- SourceCtx: srcContext,
- DestinationCtx: destContext,
- ForceManifestMIMEType: manifestType,
- }
-}
-
// GetSystemContext Constructs a new containers/image/types.SystemContext{} struct from the given signaturePolicy path
func GetSystemContext(signaturePolicyPath, authFilePath string, forceCompress bool) *types.SystemContext {
sc := &types.SystemContext{}
diff --git a/libpod/common/docker_registry_options.go b/libpod/common/docker_registry_options.go
deleted file mode 100644
index f79ae0c54..000000000
--- a/libpod/common/docker_registry_options.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package common
-
-import "github.com/containers/image/types"
-
-// DockerRegistryOptions encapsulates settings that affect how we connect or
-// authenticate to a remote registry.
-type DockerRegistryOptions struct {
- // DockerRegistryCreds is the user name and password to supply in case
- // we need to pull an image from a registry, and it requires us to
- // authenticate.
- DockerRegistryCreds *types.DockerAuthConfig
- // DockerCertPath is the location of a directory containing CA
- // certificates which will be used to verify the registry's certificate
- // (all files with names ending in ".crt"), and possibly client
- // certificates and private keys (pairs of files with the same name,
- // except for ".cert" and ".key" suffixes).
- DockerCertPath string
- // DockerInsecureSkipTLSVerify turns off verification of TLS
- // certificates and allows connecting to registries without encryption.
- DockerInsecureSkipTLSVerify bool
-}
-
-// GetSystemContext constructs a new system context from the given signaturePolicy path and the
-// values in the DockerRegistryOptions
-func (o DockerRegistryOptions) GetSystemContext(signaturePolicyPath, authFile string, forceCompress bool) *types.SystemContext {
- sc := &types.SystemContext{
- SignaturePolicyPath: signaturePolicyPath,
- DockerAuthConfig: o.DockerRegistryCreds,
- DockerCertPath: o.DockerCertPath,
- DockerInsecureSkipTLSVerify: o.DockerInsecureSkipTLSVerify,
- AuthFilePath: authFile,
- DirForceCompress: forceCompress,
- }
- return sc
-}
diff --git a/libpod/common/output_interfaces.go b/libpod/common/output_interfaces.go
deleted file mode 100644
index 805d0c79a..000000000
--- a/libpod/common/output_interfaces.go
+++ /dev/null
@@ -1 +0,0 @@
-package common
diff --git a/libpod/common_test.go b/libpod/common_test.go
index b7fee2764..81c8f1920 100644
--- a/libpod/common_test.go
+++ b/libpod/common_test.go
@@ -74,6 +74,11 @@ func getTestContainer(id, name, locksDir string) (*Container, error) {
"/test/file.test": "/test2/file2.test",
},
},
+ runtime: &Runtime{
+ config: &RuntimeConfig{
+ VolumePath: "/does/not/exist/tmp/volumes",
+ },
+ },
valid: true,
}
diff --git a/libpod/container.go b/libpod/container.go
index 7bb5b2687..18d867f41 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -9,6 +9,7 @@ import (
"github.com/containernetworking/cni/pkg/types"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
+ "github.com/containers/libpod/pkg/namespaces"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -296,6 +297,8 @@ type ContainerConfig struct {
HostAdd []string `json:"hostsAdd,omitempty"`
// Network names (CNI) to add container to. Empty to use default network.
Networks []string `json:"networks,omitempty"`
+ // Network mode specified for the default network.
+ NetMode namespaces.NetworkMode `json:"networkMode,omitempty"`
// Image Config
@@ -826,7 +829,7 @@ func (c *Container) IPs() ([]net.IPNet, error) {
}
if !c.config.CreateNetNS {
- return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod")
+ return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID())
}
ips := make([]net.IPNet, 0)
@@ -854,7 +857,7 @@ func (c *Container) Routes() ([]types.Route, error) {
}
if !c.config.CreateNetNS {
- return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod")
+ return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID())
}
routes := make([]types.Route, 0)
@@ -996,3 +999,30 @@ func (c *Container) IsInfra() bool {
func (c *Container) IsReadOnly() bool {
return c.config.Spec.Root.Readonly
}
+
+// NetworkDisabled returns whether the container is running with a disabled network
+func (c *Container) NetworkDisabled() (bool, error) {
+ if c.config.NetNsCtr != "" {
+ container, err := c.runtime.state.Container(c.config.NetNsCtr)
+ if err != nil {
+ return false, err
+ }
+ return networkDisabled(container)
+ }
+ return networkDisabled(c)
+
+}
+
+func networkDisabled(c *Container) (bool, error) {
+ if c.config.CreateNetNS {
+ return false, nil
+ }
+ if !c.config.PostConfigureNetNS {
+ for _, ns := range c.config.Spec.Linux.Namespaces {
+ if ns.Type == spec.NetworkNamespace {
+ return ns.Path == "", nil
+ }
+ }
+ }
+ return false, nil
+}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index d99aec5b4..09bc46905 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -39,7 +39,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -93,7 +93,7 @@ func (c *Container) Start(ctx context.Context) (err error) {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -159,7 +159,7 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return nil, errors.Wrapf(err, "error checking dependencies for container %s")
+ return nil, errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -328,25 +328,25 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user string) e
if err != nil {
return errors.Wrapf(err, "error exec %s", c.ID())
}
+ chWait := make(chan error)
+ go func() {
+ chWait <- execCmd.Wait()
+ }()
+ defer close(chWait)
pidFile := c.execPidPath(sessionID)
- // 1 second seems a reasonable time to wait
- // See https://github.com/containers/libpod/issues/1495
- const pidWaitTimeout = 1000
+ // 60 second seems a reasonable time to wait
+ // https://github.com/containers/libpod/issues/1495
+ // https://github.com/containers/libpod/issues/1816
+ const pidWaitTimeout = 60000
// Wait until the runtime makes the pidfile
- // TODO: If runtime errors before the PID file is created, we have to
- // wait for timeout here
- if err := WaitForFile(pidFile, pidWaitTimeout*time.Millisecond); err != nil {
- logrus.Debugf("Timed out waiting for pidfile from runtime for container %s exec", c.ID())
-
- // Check if an error occurred in the process before we made a pidfile
- // TODO: Wait() here is a poor choice - is there a way to see if
- // a process has finished, instead of waiting for it to finish?
- if err := execCmd.Wait(); err != nil {
+ exited, err := WaitForFile(pidFile, chWait, pidWaitTimeout*time.Millisecond)
+ if err != nil {
+ if exited {
+ // If the runtime exited, propagate the error we got from the process.
return err
}
-
return errors.Wrapf(err, "timed out waiting for runtime to create pidfile for exec session in container %s", c.ID())
}
@@ -388,7 +388,10 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user string) e
locked = false
}
- waitErr := execCmd.Wait()
+ var waitErr error
+ if !exited {
+ waitErr = <-chWait
+ }
// Lock again
if !c.batched {
@@ -672,22 +675,27 @@ func (c *Container) Batch(batchFunc func(*Container) error) error {
return err
}
-// Sync updates the current state of the container, checking whether its state
-// has changed
-// Sync can only be used inside Batch() - otherwise, it will be done
-// automatically.
-// When called outside Batch(), Sync() is a no-op
+// Sync updates the status of a container by querying the OCI runtime.
+// If the container has not been created inside the OCI runtime, nothing will be
+// done.
+// Most of the time, Podman does not explicitly query the OCI runtime for
+// container status, and instead relies upon exit files created by conmon.
+// This can cause a disconnect between running state and what Podman sees in
+// cases where Conmon was killed unexpected, or runc was upgraded.
+// Running a manual Sync() ensures that container state will be correct in
+// such situations.
func (c *Container) Sync() error {
if !c.batched {
- return nil
+ c.lock.Lock()
+ defer c.lock.Unlock()
}
// If runtime knows about the container, update its status in runtime
// And then save back to disk
if (c.state.State != ContainerStateUnknown) &&
- (c.state.State != ContainerStateConfigured) {
+ (c.state.State != ContainerStateConfigured) &&
+ (c.state.State != ContainerStateExited) {
oldState := c.state.State
- // TODO: optionally replace this with a stat for the exit file
if err := c.runtime.ociRuntime.updateContainerStatus(c, true); err != nil {
return err
}
@@ -715,7 +723,7 @@ func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err e
notRunning, err := c.checkDependenciesRunning()
if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s")
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
}
if len(notRunning) > 0 {
depString := strings.Join(notRunning, ",")
@@ -800,7 +808,7 @@ func (c *Container) Refresh(ctx context.Context) error {
return err
}
- logrus.Debugf("Successfully refresh container %s state")
+ logrus.Debugf("Successfully refresh container %s state", c.ID())
// Initialize the container if it was created in runc
if wasCreated || wasRunning || wasPaused {
@@ -829,9 +837,22 @@ func (c *Container) Refresh(ctx context.Context) error {
return nil
}
+// ContainerCheckpointOptions is a struct used to pass the parameters
+// for checkpointing (and restoring) to the corresponding functions
+type ContainerCheckpointOptions struct {
+ // Keep tells the API to not delete checkpoint artifacts
+ Keep bool
+ // KeepRunning tells the API to keep the container running
+ // after writing the checkpoint to disk
+ KeepRunning bool
+ // TCPEstablished tells the API to checkpoint a container
+ // even if it contains established TCP connections
+ TCPEstablished bool
+}
+
// Checkpoint checkpoints a container
-func (c *Container) Checkpoint(ctx context.Context, keep bool) error {
- logrus.Debugf("Trying to checkpoint container %s", c)
+func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
+ logrus.Debugf("Trying to checkpoint container %s", c.ID())
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -841,12 +862,12 @@ func (c *Container) Checkpoint(ctx context.Context, keep bool) error {
}
}
- return c.checkpoint(ctx, keep)
+ return c.checkpoint(ctx, options)
}
// Restore restores a container
-func (c *Container) Restore(ctx context.Context, keep bool) (err error) {
- logrus.Debugf("Trying to restore container %s", c)
+func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
+ logrus.Debugf("Trying to restore container %s", c.ID())
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -856,5 +877,5 @@ func (c *Container) Restore(ctx context.Context, keep bool) (err error) {
}
}
- return c.restore(ctx, keep)
+ return c.restore(ctx, options)
}
diff --git a/libpod/container_easyjson.go b/libpod/container_easyjson.go
index 041cc08ac..8bf5cb64f 100644
--- a/libpod/container_easyjson.go
+++ b/libpod/container_easyjson.go
@@ -8,6 +8,7 @@ import (
json "encoding/json"
types "github.com/containernetworking/cni/pkg/types"
current "github.com/containernetworking/cni/pkg/types/current"
+ namespaces "github.com/containers/libpod/pkg/namespaces"
storage "github.com/containers/storage"
idtools "github.com/containers/storage/pkg/idtools"
ocicni "github.com/cri-o/ocicni/pkg/ocicni"
@@ -1550,6 +1551,8 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
}
in.Delim(']')
}
+ case "networkMode":
+ out.NetMode = namespaces.NetworkMode(in.String())
case "userVolumes":
if in.IsNull() {
in.Skip()
@@ -2177,6 +2180,16 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
out.RawByte(']')
}
}
+ if in.NetMode != "" {
+ const prefix string = ",\"networkMode\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.NetMode))
+ }
if len(in.UserVolumes) != 0 {
const prefix string = ",\"userVolumes\":"
if first {
diff --git a/libpod/container_graph_test.go b/libpod/container_graph_test.go
index bba3d7aad..25461f1f4 100644
--- a/libpod/container_graph_test.go
+++ b/libpod/container_graph_test.go
@@ -205,6 +205,7 @@ func TestBuildContainerGraphFourContainersNoEdges(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4})
assert.NoError(t, err)
@@ -241,6 +242,7 @@ func TestBuildContainerGraphFourContainersTwoInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr2.config.UserNsCtr = ctr1.config.ID
@@ -260,6 +262,7 @@ func TestBuildContainerGraphFourContainersAllInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr2.config.UserNsCtr = ctr3.config.ID
ctr3.config.NetNsCtr = ctr4.config.ID
@@ -281,6 +284,7 @@ func TestBuildContainerGraphFourContainersNoneInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr1.config.NetNsCtr = ctr3.config.ID
ctr2.config.UserNsCtr = ctr3.config.ID
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 9b07198bc..06a0c9f32 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -1,8 +1,11 @@
package libpod
import (
+ "strings"
+
"github.com/containers/libpod/pkg/inspect"
"github.com/cri-o/ocicni/pkg/ocicni"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
@@ -48,6 +51,17 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
hostnamePath = getPath
}
+ var mounts []specs.Mount
+ for i, mnt := range spec.Mounts {
+ mounts = append(mounts, mnt)
+ // We only want to show the name of the named volume in the inspect
+ // output, so split the path and get the name out of it.
+ if strings.Contains(mnt.Source, c.runtime.config.VolumePath) {
+ split := strings.Split(mnt.Source[len(c.runtime.config.VolumePath)+1:], "/")
+ mounts[i].Source = split[0]
+ }
+ }
+
data := &inspect.ContainerInspectData{
ID: config.ID,
Created: config.CreatedTime,
@@ -85,7 +99,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
AppArmorProfile: spec.Process.ApparmorProfile,
ExecIDs: execIDs,
GraphDriver: driverData,
- Mounts: spec.Mounts,
+ Mounts: mounts,
Dependencies: c.Dependencies(),
NetworkSettings: &inspect.NetworkSettings{
Bridge: "", // TODO
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index d2f48d661..af17d8495 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -18,10 +18,7 @@ import (
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
"github.com/containers/libpod/pkg/hooks/exec"
- "github.com/containers/libpod/pkg/lookup"
- "github.com/containers/libpod/pkg/resolvconf"
"github.com/containers/libpod/pkg/rootless"
- "github.com/containers/libpod/pkg/secrets"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
@@ -276,6 +273,27 @@ func (c *Container) setupStorage(ctx context.Context) error {
},
LabelOpts: c.config.LabelOpts,
}
+ if c.config.Privileged {
+ privOpt := func(opt string) bool {
+ for _, privopt := range []string{"nodev", "nosuid", "noexec"} {
+ if opt == privopt {
+ return true
+ }
+ }
+ return false
+ }
+ defOptions, err := storage.GetDefaultMountOptions()
+ if err != nil {
+ return errors.Wrapf(err, "error getting default mount options")
+ }
+ var newOptions []string
+ for _, opt := range defOptions {
+ if !privOpt(opt) {
+ newOptions = append(newOptions, opt)
+ }
+ }
+ options.MountOpts = newOptions
+ }
if c.config.Rootfs == "" {
options.IDMappingOptions = c.config.IDMappings
@@ -583,13 +601,17 @@ func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container
}
func (c *Container) completeNetworkSetup() error {
- if !c.config.PostConfigureNetNS {
+ netDisabled, err := c.NetworkDisabled()
+ if err != nil {
+ return err
+ }
+ if !c.config.PostConfigureNetNS || netDisabled {
return nil
}
if err := c.syncContainer(); err != nil {
return err
}
- if rootless.IsRootless() {
+ if c.config.NetMode == "slirp4netns" {
return c.runtime.setupRootlessNetNS(c)
}
return c.runtime.setupNetNS(c)
@@ -597,10 +619,6 @@ func (c *Container) completeNetworkSetup() error {
// Initialize a container, creating it in the runtime
func (c *Container) init(ctx context.Context) error {
- if err := c.makeBindMounts(); err != nil {
- return err
- }
-
// Generate the OCI spec
spec, err := c.generateSpec(ctx)
if err != nil {
@@ -613,7 +631,7 @@ func (c *Container) init(ctx context.Context) error {
}
// With the spec complete, do an OCI create
- if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, false); err != nil {
+ if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil {
return err
}
@@ -828,28 +846,22 @@ func (c *Container) mountStorage() (string, error) {
return c.state.Mountpoint, nil
}
- if !rootless.IsRootless() {
- // TODO: generalize this mount code so it will mount every mount in ctr.config.Mounts
- mounted, err := mount.Mounted(c.config.ShmDir)
- if err != nil {
- return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir)
- }
+ mounted, err := mount.Mounted(c.config.ShmDir)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir)
+ }
+ if !mounted {
+ shmOptions := fmt.Sprintf("mode=1777,size=%d", c.config.ShmSize)
+ if err := c.mountSHM(shmOptions); err != nil {
+ return "", err
+ }
if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil {
return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir)
}
-
- if !mounted {
- shmOptions := fmt.Sprintf("mode=1777,size=%d", c.config.ShmSize)
- if err := c.mountSHM(shmOptions); err != nil {
- return "", err
- }
- if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil {
- return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir)
- }
- }
}
+ // TODO: generalize this mount code so it will mount every mount in ctr.config.Mounts
mountPoint := c.config.Rootfs
if mountPoint == "" {
mountPoint, err = c.mount()
@@ -987,86 +999,6 @@ func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
return nil
}
-// Make standard bind mounts to include in the container
-func (c *Container) makeBindMounts() error {
- if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil {
- return errors.Wrapf(err, "cannot chown run directory %s", c.state.RunDir)
- }
-
- if c.state.BindMounts == nil {
- c.state.BindMounts = make(map[string]string)
- }
-
- // SHM is always added when we mount the container
- c.state.BindMounts["/dev/shm"] = c.config.ShmDir
-
- // Make /etc/resolv.conf
- if _, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
- // If it already exists, delete so we can recreate
- delete(c.state.BindMounts, "/etc/resolv.conf")
- }
- newResolv, err := c.generateResolvConf()
- if err != nil {
- return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID())
- }
- c.state.BindMounts["/etc/resolv.conf"] = newResolv
-
- newPasswd, err := c.generatePasswd()
- if err != nil {
- return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID())
- }
- if newPasswd != "" {
- // Make /etc/passwd
- if _, ok := c.state.BindMounts["/etc/passwd"]; ok {
- // If it already exists, delete so we can recreate
- delete(c.state.BindMounts, "/etc/passwd")
- }
- logrus.Debugf("adding entry to /etc/passwd for non existent default user")
- c.state.BindMounts["/etc/passwd"] = newPasswd
- }
- // Make /etc/hosts
- if _, ok := c.state.BindMounts["/etc/hosts"]; ok {
- // If it already exists, delete so we can recreate
- delete(c.state.BindMounts, "/etc/hosts")
- }
- newHosts, err := c.generateHosts()
- if err != nil {
- return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
- }
- c.state.BindMounts["/etc/hosts"] = newHosts
-
- // Make /etc/hostname
- // This should never change, so no need to recreate if it exists
- if _, ok := c.state.BindMounts["/etc/hostname"]; !ok {
- hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname())
- if err != nil {
- return errors.Wrapf(err, "error creating hostname file for container %s", c.ID())
- }
- c.state.BindMounts["/etc/hostname"] = hostnamePath
- }
-
- // Make .containerenv
- // Empty file, so no need to recreate if it exists
- if _, ok := c.state.BindMounts["/run/.containerenv"]; !ok {
- // Empty string for now, but we may consider populating this later
- containerenvPath, err := c.writeStringToRundir(".containerenv", "")
- if err != nil {
- return errors.Wrapf(err, "error creating containerenv file for container %s", c.ID())
- }
- c.state.BindMounts["/run/.containerenv"] = containerenvPath
- }
-
- // Add Secret Mounts
- secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.DestinationRunDir, c.RootUID(), c.RootGID())
- for _, mount := range secretMounts {
- if _, ok := c.state.BindMounts[mount.Destination]; !ok {
- c.state.BindMounts[mount.Destination] = mount.Source
- }
- }
-
- return nil
-}
-
// writeStringToRundir copies the provided file to the runtimedir
func (c *Container) writeStringToRundir(destFile, output string) (string, error) {
destFileName := filepath.Join(c.state.RunDir, destFile)
@@ -1095,146 +1027,7 @@ func (c *Container) writeStringToRundir(destFile, output string) (string, error)
return filepath.Join(c.state.DestinationRunDir, destFile), nil
}
-// generatePasswd generates a container specific passwd file,
-// iff g.config.User is a number
-func (c *Container) generatePasswd() (string, error) {
- var (
- groupspec string
- group *user.Group
- gid int
- )
- if c.config.User == "" {
- return "", nil
- }
- spec := strings.SplitN(c.config.User, ":", 2)
- userspec := spec[0]
- if len(spec) > 1 {
- groupspec = spec[1]
- }
- // If a non numeric User, then don't generate passwd
- uid, err := strconv.ParseUint(userspec, 10, 32)
- if err != nil {
- return "", nil
- }
- // Lookup the user to see if it exists in the container image
- _, err = lookup.GetUser(c.state.Mountpoint, userspec)
- if err != nil && err != user.ErrNoPasswdEntries {
- return "", err
- }
- if err == nil {
- return "", nil
- }
- if groupspec != "" {
- if !c.state.Mounted {
- return "", errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to translate group field for passwd record", c.ID())
- }
- group, err = lookup.GetGroup(c.state.Mountpoint, groupspec)
- if err != nil {
- if err == user.ErrNoGroupEntries {
- return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
- }
- return "", err
- }
- gid = group.Gid
- }
- originPasswdFile := filepath.Join(c.state.Mountpoint, "/etc/passwd")
- orig, err := ioutil.ReadFile(originPasswdFile)
- if err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile)
- }
-
- pwd := fmt.Sprintf("%s%d:x:%d:%d:container user:%s:/bin/sh\n", orig, uid, uid, gid, c.WorkingDir())
- passwdFile, err := c.writeStringToRundir("passwd", pwd)
- if err != nil {
- return "", errors.Wrapf(err, "failed to create temporary passwd file")
- }
- if os.Chmod(passwdFile, 0644); err != nil {
- return "", err
- }
- return passwdFile, nil
-}
-
-// generateResolvConf generates a containers resolv.conf
-func (c *Container) generateResolvConf() (string, error) {
- // Determine the endpoint for resolv.conf in case it is a symlink
- resolvPath, err := filepath.EvalSymlinks("/etc/resolv.conf")
- if err != nil {
- return "", err
- }
-
- contents, err := ioutil.ReadFile(resolvPath)
- if err != nil {
- return "", errors.Wrapf(err, "unable to read %s", resolvPath)
- }
-
- // Process the file to remove localhost nameservers
- // TODO: set ipv6 enable bool more sanely
- resolv, err := resolvconf.FilterResolvDNS(contents, true)
- if err != nil {
- return "", errors.Wrapf(err, "error parsing host resolv.conf")
- }
-
- // Make a new resolv.conf
- nameservers := resolvconf.GetNameservers(resolv.Content)
- if len(c.config.DNSServer) > 0 {
- // We store DNS servers as net.IP, so need to convert to string
- nameservers = []string{}
- for _, server := range c.config.DNSServer {
- nameservers = append(nameservers, server.String())
- }
- }
-
- search := resolvconf.GetSearchDomains(resolv.Content)
- if len(c.config.DNSSearch) > 0 {
- search = c.config.DNSSearch
- }
-
- options := resolvconf.GetOptions(resolv.Content)
- if len(c.config.DNSOption) > 0 {
- options = c.config.DNSOption
- }
-
- destPath := filepath.Join(c.state.RunDir, "resolv.conf")
-
- if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "error removing resolv.conf for container %s", c.ID())
- }
-
- // Build resolv.conf
- if _, err = resolvconf.Build(destPath, nameservers, search, options); err != nil {
- return "", errors.Wrapf(err, "error building resolv.conf for container %s")
- }
-
- // Relabel resolv.conf for the container
- if err := label.Relabel(destPath, c.config.MountLabel, false); err != nil {
- return "", err
- }
-
- return filepath.Join(c.state.DestinationRunDir, "resolv.conf"), nil
-}
-
-// generateHosts creates a containers hosts file
-func (c *Container) generateHosts() (string, error) {
- orig, err := ioutil.ReadFile("/etc/hosts")
- if err != nil {
- return "", errors.Wrapf(err, "unable to read /etc/hosts")
- }
- hosts := string(orig)
- if len(c.config.HostAdd) > 0 {
- for _, host := range c.config.HostAdd {
- // the host format has already been verified at this point
- fields := strings.SplitN(host, ":", 2)
- hosts += fmt.Sprintf("%s %s\n", fields[1], fields[0])
- }
- }
- if len(c.state.NetworkStatus) > 0 && len(c.state.NetworkStatus[0].IPs) > 0 {
- ipAddress := strings.Split(c.state.NetworkStatus[0].IPs[0].Address.String(), "/")[0]
- hosts += fmt.Sprintf("%s\t%s\n", ipAddress, c.Hostname())
- }
- return c.writeStringToRundir("hosts", hosts)
-}
-
-func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator) error {
+func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator, execUser *user.ExecUser) error {
var uid, gid int
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
@@ -1260,12 +1053,8 @@ func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator)
}
if c.config.User != "" {
- if !c.state.Mounted {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to translate User field", c.ID())
- }
- execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, nil)
- if err != nil {
- return err
+ if execUser == nil {
+ return errors.Wrapf(ErrInternal, "nil pointer passed to addLocalVolumes for execUser")
}
uid = execUser.Uid
gid = execUser.Gid
@@ -1379,10 +1168,6 @@ func (c *Container) saveSpec(spec *spec.Spec) error {
}
func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (extensionStageHooks map[string][]spec.Hook, err error) {
- if len(c.runtime.config.HooksDir) == 0 {
- return nil, nil
- }
-
var locale string
var ok bool
for _, envVar := range []string{
@@ -1410,25 +1195,43 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten
}
}
- allHooks := make(map[string][]spec.Hook)
- for _, hDir := range c.runtime.config.HooksDir {
- manager, err := hooks.New(ctx, []string{hDir}, []string{"poststop"}, lang)
- if err != nil {
- if c.runtime.config.HooksDirNotExistFatal || !os.IsNotExist(err) {
- return nil, err
- }
- logrus.Warnf("failed to load hooks: {}", err)
+ if c.runtime.config.HooksDir == nil {
+ if rootless.IsRootless() {
return nil, nil
}
- hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
- if err != nil {
- return nil, err
+ allHooks := make(map[string][]spec.Hook)
+ for _, hDir := range []string{hooks.DefaultDir, hooks.OverrideDir} {
+ manager, err := hooks.New(ctx, []string{hDir}, []string{"poststop"}, lang)
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
+ if err != nil {
+ return nil, err
+ }
+ if len(hooks) > 0 || config.Hooks != nil {
+ logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir)
+ }
+ for i, hook := range hooks {
+ allHooks[i] = hook
+ }
}
- for i, hook := range hooks {
- allHooks[i] = hook
+ return allHooks, nil
+ }
+
+ manager, err := hooks.New(ctx, c.runtime.config.HooksDir, []string{"poststop"}, lang)
+ if err != nil {
+ if os.IsNotExist(err) {
+ logrus.Warnf("Requested OCI hooks directory %q does not exist", c.runtime.config.HooksDir)
+ return nil, nil
}
+ return nil, err
}
- return allHooks, nil
+
+ return manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
}
// mount mounts the container's root filesystem
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 163cd75e7..93d20491e 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -11,6 +11,7 @@ import (
"os"
"path"
"path/filepath"
+ "strconv"
"strings"
"sync"
"syscall"
@@ -21,8 +22,12 @@ import (
crioAnnotations "github.com/containers/libpod/pkg/annotations"
"github.com/containers/libpod/pkg/criu"
"github.com/containers/libpod/pkg/lookup"
+ "github.com/containers/libpod/pkg/resolvconf"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/secrets"
"github.com/containers/storage/pkg/idtools"
+ "github.com/mrunalp/fileutils"
+ "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -57,7 +62,7 @@ func (c *Container) prepare() (err error) {
networkStatus []*cnitypes.Result
createNetNSErr, mountStorageErr error
mountPoint string
- saveNetworkStatus bool
+ tmpStateLock sync.Mutex
)
wg.Add(2)
@@ -66,17 +71,55 @@ func (c *Container) prepare() (err error) {
defer wg.Done()
// Set up network namespace if not already set up
if c.config.CreateNetNS && c.state.NetNS == nil && !c.config.PostConfigureNetNS {
- saveNetworkStatus = true
netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c)
+
+ tmpStateLock.Lock()
+ defer tmpStateLock.Unlock()
+
+ // Assign NetNS attributes to container
+ if createNetNSErr == nil {
+ c.state.NetNS = netNS
+ c.state.NetworkStatus = networkStatus
+ }
}
}()
// Mount storage if not mounted
go func() {
defer wg.Done()
mountPoint, mountStorageErr = c.mountStorage()
+
+ if mountStorageErr != nil {
+ return
+ }
+
+ tmpStateLock.Lock()
+ defer tmpStateLock.Unlock()
+
+ // Finish up mountStorage
+ c.state.Mounted = true
+ c.state.Mountpoint = mountPoint
+ if c.state.UserNSRoot == "" {
+ c.state.RealMountpoint = c.state.Mountpoint
+ } else {
+ c.state.RealMountpoint = filepath.Join(c.state.UserNSRoot, "mountpoint")
+ }
+
+ logrus.Debugf("Created root filesystem for container %s at %s", c.ID(), c.state.Mountpoint)
+ }()
+
+ defer func() {
+ if err != nil {
+ if err2 := c.cleanupNetwork(); err2 != nil {
+ logrus.Errorf("Error cleaning up container %s network: %v", c.ID(), err2)
+ }
+ if err2 := c.cleanupStorage(); err2 != nil {
+ logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), err2)
+ }
+ }
}()
wg.Wait()
+
if createNetNSErr != nil {
if mountStorageErr != nil {
logrus.Error(createNetNSErr)
@@ -88,28 +131,22 @@ func (c *Container) prepare() (err error) {
return mountStorageErr
}
- // Assign NetNS attributes to container
- if saveNetworkStatus {
- c.state.NetNS = netNS
- c.state.NetworkStatus = networkStatus
- }
-
- // Finish up mountStorage
- c.state.Mounted = true
- c.state.Mountpoint = mountPoint
- if c.state.UserNSRoot == "" {
- c.state.RealMountpoint = c.state.Mountpoint
- } else {
- c.state.RealMountpoint = filepath.Join(c.state.UserNSRoot, "mountpoint")
- }
-
- logrus.Debugf("Created root filesystem for container %s at %s", c.ID(), c.state.Mountpoint)
// Save the container
return c.save()
}
// cleanupNetwork unmounts and cleans up the container's network
func (c *Container) cleanupNetwork() error {
+ if c.config.NetNsCtr != "" {
+ return nil
+ }
+ netDisabled, err := c.NetworkDisabled()
+ if err != nil {
+ return err
+ }
+ if netDisabled {
+ return nil
+ }
if c.state.NetNS == nil {
logrus.Debugf("Network is already cleaned up, skipping...")
return nil
@@ -147,6 +184,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
}
}
+
+ if err := c.makeBindMounts(); err != nil {
+ return nil, err
+ }
// Check if the spec file mounts contain the label Relabel flags z or Z.
// If they do, relabel the source directory and then remove the option.
for _, m := range g.Mounts() {
@@ -190,23 +231,18 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
}
- if !rootless.IsRootless() {
- if c.state.ExtensionStageHooks, err = c.setupOCIHooks(ctx, g.Config); err != nil {
- return nil, errors.Wrapf(err, "error setting up OCI Hooks")
- }
+ if c.state.ExtensionStageHooks, err = c.setupOCIHooks(ctx, g.Config); err != nil {
+ return nil, errors.Wrapf(err, "error setting up OCI Hooks")
}
// Bind builtin image volumes
if c.config.Rootfs == "" && c.config.ImageVolumes {
- if err := c.addLocalVolumes(ctx, &g); err != nil {
+ if err := c.addLocalVolumes(ctx, &g, execUser); err != nil {
return nil, errors.Wrapf(err, "error mounting image volumes")
}
}
if c.config.User != "" {
- if !c.state.Mounted {
- return nil, errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to translate User field", c.ID())
- }
// User and Group must go together
g.SetProcessUID(uint32(execUser.Uid))
g.SetProcessGID(uint32(execUser.Gid))
@@ -214,9 +250,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Add addition groups if c.config.GroupAdd is not empty
if len(c.config.Groups) > 0 {
- if !c.state.Mounted {
- return nil, errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to add additional groups", c.ID())
- }
gids, _ := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, nil)
for _, gid := range gids {
g.AddProcessAdditionalGid(gid)
@@ -325,8 +358,34 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Mounts need to be sorted so paths will not cover other paths
mounts := sortMounts(g.Mounts())
g.ClearMounts()
+
+ // Determine property of RootPropagation based on volume properties. If
+ // a volume is shared, then keep root propagation shared. This should
+ // work for slave and private volumes too.
+ //
+ // For slave volumes, it can be either [r]shared/[r]slave.
+ //
+ // For private volumes any root propagation value should work.
+ rootPropagation := ""
for _, m := range mounts {
g.AddMount(m)
+ for _, opt := range m.Options {
+ switch opt {
+ case MountShared, MountRShared:
+ if rootPropagation != MountShared && rootPropagation != MountRShared {
+ rootPropagation = MountShared
+ }
+ case MountSlave, MountRSlave:
+ if rootPropagation != MountShared && rootPropagation != MountRShared && rootPropagation != MountSlave && rootPropagation != MountRSlave {
+ rootPropagation = MountRSlave
+ }
+ }
+ }
+ }
+
+ if rootPropagation != "" {
+ logrus.Debugf("set root propagation to %q", rootPropagation)
+ g.SetLinuxRootPropagation(rootPropagation)
}
return g.Config, nil
}
@@ -409,7 +468,7 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
return nil
}
-func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
+func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) (err error) {
if !criu.CheckForCriu() {
return errors.Errorf("checkpointing a container requires at least CRIU %d", criu.MinCriuVersion)
@@ -418,7 +477,7 @@ func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
if c.state.State != ContainerStateRunning {
return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
- if err := c.runtime.ociRuntime.checkpointContainer(c); err != nil {
+ if err := c.runtime.ociRuntime.checkpointContainer(c, options); err != nil {
return err
}
@@ -435,14 +494,16 @@ func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
logrus.Debugf("Checkpointed container %s", c.ID())
- c.state.State = ContainerStateStopped
+ if !options.KeepRunning {
+ c.state.State = ContainerStateStopped
- // Cleanup Storage and Network
- if err := c.cleanup(ctx); err != nil {
- return err
+ // Cleanup Storage and Network
+ if err := c.cleanup(ctx); err != nil {
+ return err
+ }
}
- if !keep {
+ if !options.Keep {
// Remove log file
os.Remove(filepath.Join(c.bundlePath(), "dump.log"))
// Remove statistic file
@@ -452,7 +513,7 @@ func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
return c.save()
}
-func (c *Container) restore(ctx context.Context, keep bool) (err error) {
+func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
if !criu.CheckForCriu() {
return errors.Errorf("restoring a container requires at least CRIU %d", criu.MinCriuVersion)
@@ -540,7 +601,7 @@ func (c *Container) restore(ctx context.Context, keep bool) (err error) {
// Cleanup for a working restore.
c.removeConmonFiles()
- if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, true); err != nil {
+ if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, &options); err != nil {
return err
}
@@ -548,7 +609,7 @@ func (c *Container) restore(ctx context.Context, keep bool) (err error) {
c.state.State = ContainerStateRunning
- if !keep {
+ if !options.Keep {
// Delete all checkpoint related files. At this point, in theory, all files
// should exist. Still ignoring errors for now as the container should be
// restored and running. Not erroring out just because some cleanup operation
@@ -569,3 +630,269 @@ func (c *Container) restore(ctx context.Context, keep bool) (err error) {
return c.save()
}
+
+// Make standard bind mounts to include in the container
+func (c *Container) makeBindMounts() error {
+ if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil {
+ return errors.Wrapf(err, "cannot chown run directory %s", c.state.RunDir)
+ }
+
+ if c.state.BindMounts == nil {
+ c.state.BindMounts = make(map[string]string)
+ }
+ netDisabled, err := c.NetworkDisabled()
+ if err != nil {
+ return err
+ }
+
+ if !netDisabled {
+ // If /etc/resolv.conf and /etc/hosts exist, delete them so we
+ // will recreate
+ if path, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
+ if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error removing container %s resolv.conf", c.ID())
+ }
+ delete(c.state.BindMounts, "/etc/resolv.conf")
+ }
+ if path, ok := c.state.BindMounts["/etc/hosts"]; ok {
+ if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error removing container %s hosts", c.ID())
+ }
+ delete(c.state.BindMounts, "/etc/hosts")
+ }
+
+ if c.config.NetNsCtr != "" {
+ // We share a net namespace
+ // We want /etc/resolv.conf and /etc/hosts from the
+ // other container
+ depCtr, err := c.runtime.state.Container(c.config.NetNsCtr)
+ if err != nil {
+ return errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID())
+ }
+
+ // We need that container's bind mounts
+ bindMounts, err := depCtr.BindMounts()
+ if err != nil {
+ return errors.Wrapf(err, "error fetching bind mounts from dependency %s of container %s", depCtr.ID(), c.ID())
+ }
+
+ // The other container may not have a resolv.conf or /etc/hosts
+ // If it doesn't, don't copy them
+ resolvPath, exists := bindMounts["/etc/resolv.conf"]
+ if exists {
+ resolvDest := filepath.Join(c.state.RunDir, "resolv.conf")
+ if err := fileutils.CopyFile(resolvPath, resolvDest); err != nil {
+ return errors.Wrapf(err, "error copying resolv.conf from dependency container %s of container %s", depCtr.ID(), c.ID())
+ }
+ c.state.BindMounts["/etc/resolv.conf"] = resolvDest
+ }
+
+ hostsPath, exists := bindMounts["/etc/hosts"]
+ if exists {
+ hostsDest := filepath.Join(c.state.RunDir, "hosts")
+ if err := fileutils.CopyFile(hostsPath, hostsDest); err != nil {
+ return errors.Wrapf(err, "error copying hosts file from dependency container %s of container %s", depCtr.ID(), c.ID())
+ }
+ c.state.BindMounts["/etc/hosts"] = hostsDest
+ }
+ } else {
+ newResolv, err := c.generateResolvConf()
+ if err != nil {
+ return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID())
+ }
+ c.state.BindMounts["/etc/resolv.conf"] = newResolv
+
+ newHosts, err := c.generateHosts()
+ if err != nil {
+ return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
+ }
+ c.state.BindMounts["/etc/hosts"] = newHosts
+ }
+ }
+
+ // SHM is always added when we mount the container
+ c.state.BindMounts["/dev/shm"] = c.config.ShmDir
+
+ newPasswd, err := c.generatePasswd()
+ if err != nil {
+ return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID())
+ }
+ if newPasswd != "" {
+ // Make /etc/passwd
+ if _, ok := c.state.BindMounts["/etc/passwd"]; ok {
+ // If it already exists, delete so we can recreate
+ delete(c.state.BindMounts, "/etc/passwd")
+ }
+ logrus.Debugf("adding entry to /etc/passwd for non existent default user")
+ c.state.BindMounts["/etc/passwd"] = newPasswd
+ }
+
+ // Make /etc/hostname
+ // This should never change, so no need to recreate if it exists
+ if _, ok := c.state.BindMounts["/etc/hostname"]; !ok {
+ hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname())
+ if err != nil {
+ return errors.Wrapf(err, "error creating hostname file for container %s", c.ID())
+ }
+ c.state.BindMounts["/etc/hostname"] = hostnamePath
+ }
+
+ // Make .containerenv
+ // Empty file, so no need to recreate if it exists
+ if _, ok := c.state.BindMounts["/run/.containerenv"]; !ok {
+ // Empty string for now, but we may consider populating this later
+ containerenvPath, err := c.writeStringToRundir(".containerenv", "")
+ if err != nil {
+ return errors.Wrapf(err, "error creating containerenv file for container %s", c.ID())
+ }
+ c.state.BindMounts["/run/.containerenv"] = containerenvPath
+ }
+
+ // Add Secret Mounts
+ secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.DestinationRunDir, c.RootUID(), c.RootGID())
+ for _, mount := range secretMounts {
+ if _, ok := c.state.BindMounts[mount.Destination]; !ok {
+ c.state.BindMounts[mount.Destination] = mount.Source
+ }
+ }
+
+ return nil
+}
+
+// generateResolvConf generates a containers resolv.conf
+func (c *Container) generateResolvConf() (string, error) {
+ // Determine the endpoint for resolv.conf in case it is a symlink
+ resolvPath, err := filepath.EvalSymlinks("/etc/resolv.conf")
+ if err != nil {
+ return "", err
+ }
+
+ contents, err := ioutil.ReadFile(resolvPath)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to read %s", resolvPath)
+ }
+
+ // Ensure that the container's /etc/resolv.conf is compatible with its
+ // network configuration.
+ // TODO: set ipv6 enable bool more sanely
+ resolv, err := resolvconf.FilterResolvDNS(contents, true, c.config.CreateNetNS)
+ if err != nil {
+ return "", errors.Wrapf(err, "error parsing host resolv.conf")
+ }
+
+ // Make a new resolv.conf
+ nameservers := resolvconf.GetNameservers(resolv.Content)
+ if len(c.config.DNSServer) > 0 {
+ // We store DNS servers as net.IP, so need to convert to string
+ nameservers = []string{}
+ for _, server := range c.config.DNSServer {
+ nameservers = append(nameservers, server.String())
+ }
+ }
+
+ search := resolvconf.GetSearchDomains(resolv.Content)
+ if len(c.config.DNSSearch) > 0 {
+ search = c.config.DNSSearch
+ }
+
+ options := resolvconf.GetOptions(resolv.Content)
+ if len(c.config.DNSOption) > 0 {
+ options = c.config.DNSOption
+ }
+
+ destPath := filepath.Join(c.state.RunDir, "resolv.conf")
+
+ if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) {
+ return "", errors.Wrapf(err, "error removing resolv.conf for container %s", c.ID())
+ }
+
+ // Build resolv.conf
+ if _, err = resolvconf.Build(destPath, nameservers, search, options); err != nil {
+ return "", errors.Wrapf(err, "error building resolv.conf for container %s", c.ID())
+ }
+
+ // Relabel resolv.conf for the container
+ if err := label.Relabel(destPath, c.config.MountLabel, false); err != nil {
+ return "", err
+ }
+
+ return filepath.Join(c.state.DestinationRunDir, "resolv.conf"), nil
+}
+
+// generateHosts creates a containers hosts file
+func (c *Container) generateHosts() (string, error) {
+ orig, err := ioutil.ReadFile("/etc/hosts")
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to read /etc/hosts")
+ }
+ hosts := string(orig)
+ if len(c.config.HostAdd) > 0 {
+ for _, host := range c.config.HostAdd {
+ // the host format has already been verified at this point
+ fields := strings.SplitN(host, ":", 2)
+ hosts += fmt.Sprintf("%s %s\n", fields[1], fields[0])
+ }
+ }
+ if len(c.state.NetworkStatus) > 0 && len(c.state.NetworkStatus[0].IPs) > 0 {
+ ipAddress := strings.Split(c.state.NetworkStatus[0].IPs[0].Address.String(), "/")[0]
+ hosts += fmt.Sprintf("%s\t%s\n", ipAddress, c.Hostname())
+ }
+ return c.writeStringToRundir("hosts", hosts)
+}
+
+// generatePasswd generates a container specific passwd file,
+// iff g.config.User is a number
+func (c *Container) generatePasswd() (string, error) {
+ var (
+ groupspec string
+ gid int
+ )
+ if c.config.User == "" {
+ return "", nil
+ }
+ spec := strings.SplitN(c.config.User, ":", 2)
+ userspec := spec[0]
+ if len(spec) > 1 {
+ groupspec = spec[1]
+ }
+ // If a non numeric User, then don't generate passwd
+ uid, err := strconv.ParseUint(userspec, 10, 32)
+ if err != nil {
+ return "", nil
+ }
+ // Lookup the user to see if it exists in the container image
+ _, err = lookup.GetUser(c.state.Mountpoint, userspec)
+ if err != nil && err != user.ErrNoPasswdEntries {
+ return "", err
+ }
+ if err == nil {
+ return "", nil
+ }
+ if groupspec != "" {
+ ugid, err := strconv.ParseUint(groupspec, 10, 32)
+ if err == nil {
+ gid = int(ugid)
+ } else {
+ group, err := lookup.GetGroup(c.state.Mountpoint, groupspec)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
+ }
+ gid = group.Gid
+ }
+ }
+ originPasswdFile := filepath.Join(c.state.Mountpoint, "/etc/passwd")
+ orig, err := ioutil.ReadFile(originPasswdFile)
+ if err != nil && !os.IsNotExist(err) {
+ return "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile)
+ }
+
+ pwd := fmt.Sprintf("%s%d:x:%d:%d:container user:%s:/bin/sh\n", orig, uid, uid, gid, c.WorkingDir())
+ passwdFile, err := c.writeStringToRundir("passwd", pwd)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to create temporary passwd file")
+ }
+ if os.Chmod(passwdFile, 0644); err != nil {
+ return "", err
+ }
+ return passwdFile, nil
+}
diff --git a/libpod/container_internal_unsupported.go b/libpod/container_internal_unsupported.go
index eed0449a9..4af0cd56c 100644
--- a/libpod/container_internal_unsupported.go
+++ b/libpod/container_internal_unsupported.go
@@ -28,10 +28,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
return nil, ErrNotImplemented
}
-func (c *Container) checkpoint(ctx context.Context, keep bool) error {
+func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
return ErrNotImplemented
}
-func (c *Container) restore(ctx context.Context, keep bool) error {
+func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) error {
return ErrNotImplemented
}
diff --git a/libpod/errors.go b/libpod/errors.go
index 75b4928da..d6614141c 100644
--- a/libpod/errors.go
+++ b/libpod/errors.go
@@ -11,18 +11,24 @@ var (
ErrNoSuchPod = errors.New("no such pod")
// ErrNoSuchImage indicates the requested image does not exist
ErrNoSuchImage = errors.New("no such image")
+ // ErrNoSuchVolume indicates the requested volume does not exist
+ ErrNoSuchVolume = errors.New("no such volume")
// ErrCtrExists indicates a container with the same name or ID already
// exists
ErrCtrExists = errors.New("container already exists")
// ErrPodExists indicates a pod with the same name or ID already exists
ErrPodExists = errors.New("pod already exists")
- // ErrImageExists indicated an image with the same ID already exists
+ // ErrImageExists indicates an image with the same ID already exists
ErrImageExists = errors.New("image already exists")
+ // ErrVolumeExists indicates a volume with the same name already exists
+ ErrVolumeExists = errors.New("volume already exists")
// ErrCtrStateInvalid indicates a container is in an improper state for
// the requested operation
ErrCtrStateInvalid = errors.New("container state improper")
+ // ErrVolumeBeingUsed indicates that a volume is being used by at least one container
+ ErrVolumeBeingUsed = errors.New("volume is being used")
// ErrRuntimeFinalized indicates that the runtime has already been
// created and cannot be modified
@@ -33,6 +39,9 @@ var (
// ErrPodFinalized indicates that the pod has already been created and
// cannot be modified
ErrPodFinalized = errors.New("pod has been finalized")
+ // ErrVolumeFinalized indicates that the volume has already been created and
+ // cannot be modified
+ ErrVolumeFinalized = errors.New("volume has been finalized")
// ErrInvalidArg indicates that an invalid argument was passed
ErrInvalidArg = errors.New("invalid argument")
@@ -55,6 +64,9 @@ var (
// ErrPodRemoved indicates that the pod has already been removed and no
// further operations can be performed on it
ErrPodRemoved = errors.New("pod has already been removed")
+ // ErrVolumeRemoved indicates that the volume has already been removed and
+ // no further operations can be performed on it
+ ErrVolumeRemoved = errors.New("volume has already been removed")
// ErrDBClosed indicates that the connection to the state database has
// already been closed
diff --git a/libpod/image/docker_registry_options.go b/libpod/image/docker_registry_options.go
index 97a151396..c191a3ca2 100644
--- a/libpod/image/docker_registry_options.go
+++ b/libpod/image/docker_registry_options.go
@@ -19,8 +19,9 @@ type DockerRegistryOptions struct {
// except for ".cert" and ".key" suffixes).
DockerCertPath string
// DockerInsecureSkipTLSVerify turns off verification of TLS
- // certificates and allows connecting to registries without encryption.
- DockerInsecureSkipTLSVerify bool
+ // certificates and allows connecting to registries without encryption
+ // - or forces it on even if registries.conf has the registry configured as insecure.
+ DockerInsecureSkipTLSVerify types.OptionalBool
}
// GetSystemContext constructs a new system context from a parent context. the values in the DockerRegistryOptions, and other parameters.
diff --git a/libpod/image/errors.go b/libpod/image/errors.go
new file mode 100644
index 000000000..4088946cb
--- /dev/null
+++ b/libpod/image/errors.go
@@ -0,0 +1,15 @@
+package image
+
+import (
+ "errors"
+)
+
+// Copied directly from libpod errors to avoid circular imports
+var (
+ // ErrNoSuchCtr indicates the requested container does not exist
+ ErrNoSuchCtr = errors.New("no such container")
+ // ErrNoSuchPod indicates the requested pod does not exist
+ ErrNoSuchPod = errors.New("no such pod")
+ // ErrNoSuchImage indicates the requested image does not exist
+ ErrNoSuchImage = errors.New("no such image")
+)
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 7e520d97e..476d28226 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -125,7 +125,7 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) {
// New creates a new image object where the image could be local
// or remote
-func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, forcePull, forceSecure bool) (*Image, error) {
+func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, forcePull bool) (*Image, error) {
// We don't know if the image is local or not ... check local first
newImage := Image{
InputName: name,
@@ -145,7 +145,7 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, forceSecure)
+ imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions)
if err != nil {
return nil, errors.Wrapf(err, "unable to pull %s", name)
}
@@ -167,7 +167,7 @@ func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.Im
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{}, false)
+ imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{})
if err != nil {
return nil, errors.Wrapf(err, "unable to pull %s", transports.ImageName(srcRef))
}
@@ -252,7 +252,7 @@ func (i *Image) getLocalImage() (*storage.Image, error) {
// The image has a registry name in it and we made sure we looked for it locally
// with a tag. It cannot be local.
if decomposedImage.hasRegistry {
- return nil, errors.Errorf("%s", imageError)
+ return nil, errors.Wrapf(ErrNoSuchImage, imageError)
}
@@ -275,7 +275,7 @@ func (i *Image) getLocalImage() (*storage.Image, error) {
return repoImage, nil
}
- return nil, errors.Wrapf(err, imageError)
+ return nil, errors.Wrapf(ErrNoSuchImage, err.Error())
}
// ID returns the image ID as a string
@@ -498,7 +498,7 @@ func (i *Image) UntagImage(tag string) error {
// PushImageToHeuristicDestination pushes the given image to "destination", which is heuristically parsed.
// Use PushImageToReference if the destination is known precisely.
-func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, forceSecure bool, additionalDockerArchiveTags []reference.NamedTagged) error {
+func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
if destination == "" {
return errors.Wrapf(syscall.EINVAL, "destination image name must be specified")
}
@@ -516,11 +516,11 @@ func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination
return err
}
}
- return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, forceSecure, additionalDockerArchiveTags)
+ return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags)
}
// PushImageToReference pushes the given image to a location described by the given path
-func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, forceSecure bool, additionalDockerArchiveTags []reference.NamedTagged) error {
+func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
sc := GetSystemContext(signaturePolicyPath, authFile, forceCompress)
policyContext, err := getPolicyContext(sc)
@@ -534,23 +534,8 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere
if err != nil {
return errors.Wrapf(err, "error getting source imageReference for %q", i.InputName)
}
- insecureRegistries, err := registries.GetInsecureRegistries()
- if err != nil {
- return err
- }
copyOptions := getCopyOptions(sc, writer, nil, dockerRegistryOptions, signingOptions, manifestMIMEType, additionalDockerArchiveTags)
- if dest.Transport().Name() == DockerTransport {
- imgRef := dest.DockerReference()
- if imgRef == nil { // This should never happen; such references can’t be created.
- return fmt.Errorf("internal error: DockerTransport reference %s does not have a DockerReference", transports.ImageName(dest))
- }
- registry := reference.Domain(imgRef)
-
- if util.StringInSlice(registry, insecureRegistries) && !forceSecure {
- copyOptions.DestinationCtx.DockerInsecureSkipTLSVerify = true
- logrus.Info(fmt.Sprintf("%s is an insecure registry; pushing with tls-verify=false", registry))
- }
- }
+ copyOptions.DestinationCtx.SystemRegistriesConfPath = registries.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
// Copy the image to the remote destination
_, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
if err != nil {
@@ -869,6 +854,7 @@ func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) {
GraphDriver: driver,
ManifestType: manifestType,
User: ociv1Img.Config.User,
+ History: ociv1Img.History,
}
return data, nil
}
diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go
index f187631b4..91bb2411b 100644
--- a/libpod/image/image_test.go
+++ b/libpod/image/image_test.go
@@ -86,9 +86,9 @@ func TestImage_NewFromLocal(t *testing.T) {
// Need images to be present for this test
ir, err := NewImageRuntimeFromOptions(so)
assert.NoError(t, err)
- bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, false, false)
+ bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, false)
assert.NoError(t, err)
- bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, false, false)
+ bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, false)
assert.NoError(t, err)
tm, err := makeLocalMatrix(bb, bbglibc)
@@ -135,7 +135,7 @@ func TestImage_New(t *testing.T) {
// Iterate over the names and delete the image
// after the pull
for _, img := range names {
- newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, false, false)
+ newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, false)
assert.NoError(t, err)
assert.NotEqual(t, newImage.ID(), "")
err = newImage.Remove(false)
@@ -163,7 +163,7 @@ func TestImage_MatchRepoTag(t *testing.T) {
}
ir, err := NewImageRuntimeFromOptions(so)
assert.NoError(t, err)
- newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, false, false)
+ newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, false)
assert.NoError(t, err)
err = newImage.TagImage("foo:latest")
assert.NoError(t, err)
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
new file mode 100644
index 000000000..6a1f160d5
--- /dev/null
+++ b/libpod/image/prune.go
@@ -0,0 +1,26 @@
+package image
+
+// GetPruneImages returns a slice of images that have no names/unused
+func (ir *Runtime) GetPruneImages() ([]*Image, error) {
+ var (
+ unamedImages []*Image
+ )
+ allImages, err := ir.GetImages()
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range allImages {
+ if len(i.Names()) == 0 {
+ unamedImages = append(unamedImages, i)
+ continue
+ }
+ containers, err := i.Containers()
+ if err != nil {
+ return nil, err
+ }
+ if len(containers) < 1 {
+ unamedImages = append(unamedImages, i)
+ }
+ }
+ return unamedImages, nil
+}
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index bfa04d069..09935fe7c 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/image/directory"
"github.com/containers/image/docker"
dockerarchive "github.com/containers/image/docker/archive"
- "github.com/containers/image/docker/reference"
"github.com/containers/image/docker/tarfile"
ociarchive "github.com/containers/image/oci/archive"
"github.com/containers/image/pkg/sysregistries"
@@ -19,7 +18,6 @@ import (
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/libpod/pkg/registries"
- "github.com/containers/libpod/pkg/util"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -193,7 +191,7 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
// pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries.
// Use pullImageFromReference if the source is known precisely.
-func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, forceSecure bool) ([]string, error) {
+func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
var goal *pullGoal
sc := GetSystemContext(signaturePolicyPath, authfile, false)
srcRef, err := alltransports.ParseImageName(inputName)
@@ -209,48 +207,33 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
return nil, errors.Wrapf(err, "error determining pull goal for image %q", inputName)
}
}
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, forceSecure)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions)
}
// pullImageFromReference pulls an image from a types.imageReference.
-func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, forceSecure bool) ([]string, error) {
+func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
sc := GetSystemContext(signaturePolicyPath, authfile, false)
goal, err := ir.pullGoalFromImageReference(ctx, srcRef, transports.ImageName(srcRef), sc)
if err != nil {
return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef))
}
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, forceSecure)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions)
}
// doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead.
-func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, forceSecure bool) ([]string, error) {
+func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
policyContext, err := getPolicyContext(sc)
if err != nil {
return nil, err
}
defer policyContext.Destroy()
- insecureRegistries, err := registries.GetInsecureRegistries()
- if err != nil {
- return nil, err
- }
+ systemRegistriesConfPath := registries.SystemRegistriesConfPath()
var images []string
var pullErrors *multierror.Error
for _, imageInfo := range goal.refPairs {
copyOptions := getCopyOptions(sc, writer, dockerOptions, nil, signingOptions, "", nil)
- if imageInfo.srcRef.Transport().Name() == DockerTransport {
- imgRef := imageInfo.srcRef.DockerReference()
- if imgRef == nil { // This should never happen; such references can’t be created.
- return nil, fmt.Errorf("internal error: DockerTransport reference %s does not have a DockerReference",
- transports.ImageName(imageInfo.srcRef))
- }
- registry := reference.Domain(imgRef)
-
- if util.StringInSlice(registry, insecureRegistries) && !forceSecure {
- copyOptions.SourceCtx.DockerInsecureSkipTLSVerify = true
- logrus.Info(fmt.Sprintf("%s is an insecure registry; pulling with tls-verify=false", registry))
- }
- }
+ copyOptions.SourceCtx.SystemRegistriesConfPath = systemRegistriesConfPath // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
// Print the following statement only when pulling from a docker or atomic registry
if writer != nil && (imageInfo.srcRef.Transport().Name() == DockerTransport || imageInfo.srcRef.Transport().Name() == AtomicTransport) {
io.WriteString(writer, fmt.Sprintf("Trying to pull %s...", imageInfo.image))
@@ -271,7 +254,7 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
}
// If no image was found, we should handle. Lets be nicer to the user and see if we can figure out why.
if len(images) == 0 {
- registryPath := sysregistries.RegistriesConfPath(&types.SystemContext{})
+ registryPath := sysregistries.RegistriesConfPath(&types.SystemContext{SystemRegistriesConfPath: systemRegistriesConfPath})
if goal.usedSearchRegistries && len(goal.searchedRegistries) == 0 {
return nil, errors.Errorf("image name provided is a short name and no search registries are defined in %s.", registryPath)
}
diff --git a/libpod/image/utils.go b/libpod/image/utils.go
index 9a75ca6dc..b944de1bb 100644
--- a/libpod/image/utils.go
+++ b/libpod/image/utils.go
@@ -2,6 +2,8 @@ package image
import (
"io"
+ "net/url"
+ "regexp"
"strings"
cp "github.com/containers/image/copy"
@@ -117,3 +119,23 @@ func GetAdditionalTags(images []string) ([]reference.NamedTagged, error) {
}
return allTags, nil
}
+
+// IsValidImageURI checks if image name has valid format
+func IsValidImageURI(imguri string) (bool, error) {
+ uri := "http://" + imguri
+ u, err := url.Parse(uri)
+ if err != nil {
+ return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ }
+ reg := regexp.MustCompile(`^[a-zA-Z0-9-_\.]+\/?:?[0-9]*[a-z0-9-\/:]*$`)
+ ret := reg.FindAllString(u.Host, -1)
+ if len(ret) == 0 {
+ return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ }
+ reg = regexp.MustCompile(`^[a-z0-9-:\./]*$`)
+ ret = reg.FindAllString(u.Fragment, -1)
+ if len(ret) == 0 {
+ return false, errors.Wrapf(err, "invalid image uri: %s", imguri)
+ }
+ return true, nil
+}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 78e765ccd..314799309 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -18,8 +18,10 @@ type InMemoryState struct {
pods map[string]*Pod
// Maps container ID to container struct.
containers map[string]*Container
+ volumes map[string]*Volume
// Maps container ID to a list of IDs of dependencies.
- ctrDepends map[string][]string
+ ctrDepends map[string][]string
+ volumeDepends map[string][]string
// Maps pod ID to a map of container ID to container struct.
podContainers map[string]map[string]*Container
// Global name registry - ensures name uniqueness and performs lookups.
@@ -46,8 +48,10 @@ func NewInMemoryState() (State, error) {
state.pods = make(map[string]*Pod)
state.containers = make(map[string]*Container)
+ state.volumes = make(map[string]*Volume)
state.ctrDepends = make(map[string][]string)
+ state.volumeDepends = make(map[string][]string)
state.podContainers = make(map[string]map[string]*Container)
@@ -73,6 +77,18 @@ func (s *InMemoryState) Refresh() error {
return nil
}
+// GetDBConfig is not implemented for in-memory state.
+// As we do not store a config, return an empty one.
+func (s *InMemoryState) GetDBConfig() (*DBConfig, error) {
+ return &DBConfig{}, nil
+}
+
+// ValidateDBConfig is not implemented for the in-memory state.
+// Since we do nothing just return no error.
+func (s *InMemoryState) ValidateDBConfig(runtime *Runtime) error {
+ return nil
+}
+
// SetNamespace sets the namespace for container and pod retrieval.
func (s *InMemoryState) SetNamespace(ns string) error {
s.namespace = ns
@@ -232,6 +248,14 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
s.addCtrToDependsMap(ctr.ID(), depCtr)
}
+ // Add container to volume dependencies
+ for _, vol := range ctr.config.Spec.Mounts {
+ if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
+ volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
+ s.addCtrToVolDependsMap(ctr.ID(), volName)
+ }
+ }
+
return nil
}
@@ -282,6 +306,14 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
s.removeCtrFromDependsMap(ctr.ID(), depCtr)
}
+ // Remove container from volume dependencies
+ for _, vol := range ctr.config.Spec.Mounts {
+ if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
+ volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
+ s.removeCtrFromVolDependsMap(ctr.ID(), volName)
+ }
+ }
+
return nil
}
@@ -346,6 +378,114 @@ func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) {
return arr, nil
}
+// Volume retrieves a volume from its full name
+func (s *InMemoryState) Volume(name string) (*Volume, error) {
+ if name == "" {
+ return nil, ErrEmptyID
+ }
+
+ vol, ok := s.volumes[name]
+ if !ok {
+ return nil, errors.Wrapf(ErrNoSuchCtr, "no volume with name %s found", name)
+ }
+
+ return vol, nil
+}
+
+// HasVolume checks if a volume with the given name is present in the state
+func (s *InMemoryState) HasVolume(name string) (bool, error) {
+ if name == "" {
+ return false, ErrEmptyID
+ }
+
+ _, ok := s.volumes[name]
+ if !ok {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// AddVolume adds a volume to the state
+func (s *InMemoryState) AddVolume(volume *Volume) error {
+ if !volume.valid {
+ return errors.Wrapf(ErrVolumeRemoved, "volume with name %s is not valid", volume.Name())
+ }
+
+ if _, ok := s.volumes[volume.Name()]; ok {
+ return errors.Wrapf(ErrVolumeExists, "volume with name %s already exists in state", volume.Name())
+ }
+
+ s.volumes[volume.Name()] = volume
+
+ return nil
+}
+
+// RemoveVolume removes a volume from the state
+func (s *InMemoryState) RemoveVolume(volume *Volume) error {
+ // Ensure we don't remove a volume which containers depend on
+ deps, ok := s.volumeDepends[volume.Name()]
+ if ok && len(deps) != 0 {
+ depsStr := strings.Join(deps, ", ")
+ return errors.Wrapf(ErrVolumeExists, "the following containers depend on volume %s: %s", volume.Name(), depsStr)
+ }
+
+ if _, ok := s.volumes[volume.Name()]; !ok {
+ volume.valid = false
+ return errors.Wrapf(ErrVolumeRemoved, "no volume exists in state with name %s", volume.Name())
+ }
+
+ delete(s.volumes, volume.Name())
+
+ return nil
+}
+
+// RemoveVolCtrDep updates the container dependencies of the volume
+func (s *InMemoryState) RemoveVolCtrDep(volume *Volume, ctrID string) error {
+ if !volume.valid {
+ return errors.Wrapf(ErrVolumeRemoved, "volume with name %s is not valid", volume.Name())
+ }
+
+ if _, ok := s.volumes[volume.Name()]; !ok {
+ return errors.Wrapf(ErrNoSuchVolume, "volume with name %s doesn't exists in state", volume.Name())
+ }
+
+ // Remove container that is using this volume
+ s.removeCtrFromVolDependsMap(ctrID, volume.Name())
+
+ return nil
+}
+
+// VolumeInUse checks if the given volume is being used by at least one container
+func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) {
+ if !volume.valid {
+ return nil, ErrVolumeRemoved
+ }
+
+ // If the volume does not exist, return error
+ if _, ok := s.volumes[volume.Name()]; !ok {
+ volume.valid = false
+ return nil, errors.Wrapf(ErrNoSuchVolume, "volume with name %s not found in state", volume.Name())
+ }
+
+ arr, ok := s.volumeDepends[volume.Name()]
+ if !ok {
+ return []string{}, nil
+ }
+
+ return arr, nil
+}
+
+// AllVolumes returns all volumes that exist in the state
+func (s *InMemoryState) AllVolumes() ([]*Volume, error) {
+ allVols := make([]*Volume, 0, len(s.volumes))
+ for _, v := range s.volumes {
+ allVols = append(allVols, v)
+ }
+
+ return allVols, nil
+}
+
// AllContainers retrieves all containers from the state
func (s *InMemoryState) AllContainers() ([]*Container, error) {
ctrs := make([]*Container, 0, len(s.containers))
@@ -933,6 +1073,44 @@ func (s *InMemoryState) removeCtrFromDependsMap(ctrID, dependsID string) {
}
}
+// Add a container to the dependency mappings for the volume
+func (s *InMemoryState) addCtrToVolDependsMap(depCtrID, volName string) {
+ if volName != "" {
+ arr, ok := s.volumeDepends[volName]
+ if !ok {
+ // Do not have a mapping for that volume yet
+ s.volumeDepends[volName] = []string{depCtrID}
+ } else {
+ // Have a mapping for the volume
+ arr = append(arr, depCtrID)
+ s.volumeDepends[volName] = arr
+ }
+ }
+}
+
+// Remove a container from the dependency mappings for the volume
+func (s *InMemoryState) removeCtrFromVolDependsMap(depCtrID, volName string) {
+ if volName != "" {
+ arr, ok := s.volumeDepends[volName]
+ if !ok {
+ // Internal state seems inconsistent
+ // But the dependency is definitely gone
+ // So just return
+ return
+ }
+
+ newArr := make([]string, 0, len(arr))
+
+ for _, id := range arr {
+ if id != depCtrID {
+ newArr = append(newArr, id)
+ }
+ }
+
+ s.volumeDepends[volName] = newArr
+ }
+}
+
// Check if we can access a pod or container, or if that is blocked by
// namespaces.
func (s *InMemoryState) checkNSMatch(id, ns string) error {
diff --git a/libpod/info.go b/libpod/info.go
index 4cbf3f734..5d8d160c8 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -12,6 +12,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/utils"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
@@ -30,6 +31,7 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) {
info["os"] = runtime.GOOS
info["arch"] = runtime.GOARCH
info["cpus"] = runtime.NumCPU()
+ info["rootless"] = rootless.IsRootless()
mi, err := system.ReadMemInfo()
if err != nil {
return nil, errors.Wrapf(err, "error reading memory info")
diff --git a/libpod/kube.go b/libpod/kube.go
new file mode 100644
index 000000000..c164ca0c5
--- /dev/null
+++ b/libpod/kube.go
@@ -0,0 +1,440 @@
+package libpod
+
+import (
+ "fmt"
+ "math/rand"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/libpod/pkg/lookup"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// GenerateForKube takes a slice of libpod containers and generates
+// one v1.Pod description that includes just a single container.
+func (c *Container) GenerateForKube() (*v1.Pod, error) {
+ // Generate the v1.Pod yaml description
+ return simplePodWithV1Container(c)
+}
+
+// GenerateForKube takes a slice of libpod containers and generates
+// one v1.Pod description
+func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
+ // Generate the v1.Pod yaml description
+ var servicePorts []v1.ServicePort
+
+ allContainers, err := p.allContainers()
+ if err != nil {
+ return nil, servicePorts, err
+ }
+ // If the pod has no containers, no sense to generate YAML
+ if len(allContainers) == 0 {
+ return nil, servicePorts, errors.Errorf("pod %s has no containers", p.ID())
+ }
+ // If only an infra container is present, makes no sense to generate YAML
+ if len(allContainers) == 1 && p.HasInfraContainer() {
+ return nil, servicePorts, errors.Errorf("pod %s only has an infra container", p.ID())
+ }
+
+ if p.HasInfraContainer() {
+ infraContainer, err := p.getInfraContainer()
+ if err != nil {
+ return nil, servicePorts, err
+ }
+
+ ports, err := ocicniPortMappingToContainerPort(infraContainer.config.PortMappings)
+ if err != nil {
+ return nil, servicePorts, err
+ }
+ servicePorts = containerPortsToServicePorts(ports)
+ }
+ pod, err := p.podWithContainers(allContainers)
+ return pod, servicePorts, err
+}
+
+func (p *Pod) getInfraContainer() (*Container, error) {
+ infraID, err := p.InfraContainerID()
+ if err != nil {
+ return nil, err
+ }
+ return p.runtime.LookupContainer(infraID)
+}
+
+// GenerateKubeServiceFromV1Pod creates a v1 service object from a v1 pod object
+func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) v1.Service {
+ service := v1.Service{}
+ selector := make(map[string]string)
+ selector["app"] = pod.Labels["app"]
+ ports := servicePorts
+ if len(ports) == 0 {
+ ports = containersToServicePorts(pod.Spec.Containers)
+ }
+ serviceSpec := v1.ServiceSpec{
+ Ports: ports,
+ Selector: selector,
+ Type: v1.ServiceTypeNodePort,
+ }
+ service.Spec = serviceSpec
+ service.ObjectMeta = pod.ObjectMeta
+ tm := v12.TypeMeta{
+ Kind: "Service",
+ APIVersion: pod.TypeMeta.APIVersion,
+ }
+ service.TypeMeta = tm
+ return service
+}
+
+// containerPortsToServicePorts takes a slice of containerports and generates a
+// slice of service ports
+func containerPortsToServicePorts(containerPorts []v1.ContainerPort) []v1.ServicePort {
+ var sps []v1.ServicePort
+ for _, cp := range containerPorts {
+ nodePort := 30000 + rand.Intn(32767-30000+1)
+ servicePort := v1.ServicePort{
+ Protocol: cp.Protocol,
+ Port: cp.ContainerPort,
+ NodePort: int32(nodePort),
+ Name: strconv.Itoa(int(cp.ContainerPort)),
+ }
+ sps = append(sps, servicePort)
+ }
+ return sps
+}
+
+// containersToServicePorts takes a slice of v1.Containers and generates an
+// inclusive list of serviceports to expose
+func containersToServicePorts(containers []v1.Container) []v1.ServicePort {
+ var sps []v1.ServicePort
+ // Without the call to rand.Seed, a program will produce the same sequence of pseudo-random numbers
+ // for each execution. Legal nodeport range is 30000-32767
+ rand.Seed(time.Now().UnixNano())
+
+ for _, ctr := range containers {
+ sps = append(sps, containerPortsToServicePorts(ctr.Ports)...)
+ }
+ return sps
+}
+
+func (p *Pod) podWithContainers(containers []*Container) (*v1.Pod, error) {
+ var podContainers []v1.Container
+ for _, ctr := range containers {
+ result, err := containerToV1Container(ctr)
+ if err != nil {
+ return nil, err
+ }
+ if !ctr.IsInfra() {
+ podContainers = append(podContainers, result)
+ }
+ }
+
+ return addContainersToPodObject(podContainers, p.Name()), nil
+}
+
+func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod {
+ tm := v12.TypeMeta{
+ Kind: "Pod",
+ APIVersion: "v1",
+ }
+
+ // Add a label called "app" with the containers name as a value
+ labels := make(map[string]string)
+ labels["app"] = removeUnderscores(podName)
+ om := v12.ObjectMeta{
+ // The name of the pod is container_name-libpod
+ Name: fmt.Sprintf("%s-libpod", removeUnderscores(podName)),
+ Labels: labels,
+ // CreationTimestamp seems to be required, so adding it; in doing so, the timestamp
+ // will reflect time this is run (not container create time) because the conversion
+ // of the container create time to v1 Time is probably not warranted nor worthwhile.
+ CreationTimestamp: v12.Now(),
+ }
+ ps := v1.PodSpec{
+ Containers: containers,
+ }
+ p := v1.Pod{
+ TypeMeta: tm,
+ ObjectMeta: om,
+ Spec: ps,
+ }
+ return &p
+}
+
+// simplePodWithV1Container is a function used by inspect when kube yaml needs to be generated
+// for a single container. we "insert" that container description in a pod.
+func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
+ var containers []v1.Container
+ result, err := containerToV1Container(ctr)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, result)
+ return addContainersToPodObject(containers, ctr.Name()), nil
+
+}
+
+// containerToV1Container converts information we know about a libpod container
+// to a V1.Container specification.
+func containerToV1Container(c *Container) (v1.Container, error) {
+ kubeContainer := v1.Container{}
+ kubeSec, err := generateKubeSecurityContext(c)
+ if err != nil {
+ return kubeContainer, err
+ }
+
+ if len(c.config.Spec.Linux.Devices) > 0 {
+ // TODO Enable when we can support devices and their names
+ devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
+ if err != nil {
+ return kubeContainer, err
+ }
+ kubeContainer.VolumeDevices = devices
+ return kubeContainer, errors.Wrapf(ErrNotImplemented, "linux devices")
+ }
+
+ if len(c.config.UserVolumes) > 0 {
+ // TODO When we until we can resolve what the volume name should be, this is disabled
+ // Volume names need to be coordinated "globally" in the kube files.
+ volumes, err := libpodMountsToKubeVolumeMounts(c)
+ if err != nil {
+ return kubeContainer, err
+ }
+ kubeContainer.VolumeMounts = volumes
+ return kubeContainer, errors.Wrapf(ErrNotImplemented, "volume names")
+ }
+
+ envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env)
+ if err != nil {
+ return kubeContainer, nil
+ }
+
+ ports, err := ocicniPortMappingToContainerPort(c.PortMappings())
+ if err != nil {
+ return kubeContainer, nil
+ }
+
+ containerCommands := c.Command()
+ kubeContainer.Name = removeUnderscores(c.Name())
+
+ _, image := c.Image()
+ kubeContainer.Image = image
+ kubeContainer.Stdin = c.Stdin()
+ kubeContainer.Command = containerCommands
+ // TODO need to figure out how we handle command vs entry point. Kube appears to prefer entrypoint.
+ // right now we just take the container's command
+ //container.Args = args
+ kubeContainer.WorkingDir = c.WorkingDir()
+ kubeContainer.Ports = ports
+ // This should not be applicable
+ //container.EnvFromSource =
+ kubeContainer.Env = envVariables
+ // TODO enable resources when we can support naming conventions
+ //container.Resources
+ kubeContainer.SecurityContext = kubeSec
+ kubeContainer.StdinOnce = false
+ kubeContainer.TTY = c.config.Spec.Process.Terminal
+
+ return kubeContainer, nil
+}
+
+// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
+// it to a v1.ContainerPort format for kube output
+func ocicniPortMappingToContainerPort(portMappings []ocicni.PortMapping) ([]v1.ContainerPort, error) {
+ var containerPorts []v1.ContainerPort
+ for _, p := range portMappings {
+ var protocol v1.Protocol
+ switch strings.ToUpper(p.Protocol) {
+ case "TCP":
+ protocol = v1.ProtocolTCP
+ case "UDP":
+ protocol = v1.ProtocolUDP
+ default:
+ return containerPorts, errors.Errorf("unknown network protocol %s", p.Protocol)
+ }
+ cp := v1.ContainerPort{
+ // Name will not be supported
+ HostPort: p.HostPort,
+ HostIP: p.HostIP,
+ ContainerPort: p.ContainerPort,
+ Protocol: protocol,
+ }
+ containerPorts = append(containerPorts, cp)
+ }
+ return containerPorts, nil
+}
+
+// libpodEnvVarsToKubeEnvVars converts a key=value string slice to []v1.EnvVar
+func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
+ var envVars []v1.EnvVar
+ for _, e := range envs {
+ splitE := strings.SplitN(e, "=", 2)
+ if len(splitE) != 2 {
+ return envVars, errors.Errorf("environment variable %s is malformed; should be key=value", e)
+ }
+ ev := v1.EnvVar{
+ Name: splitE[0],
+ Value: splitE[1],
+ }
+ envVars = append(envVars, ev)
+ }
+ return envVars, nil
+}
+
+// Is this worth it?
+func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceList) { //nolint
+ // It does not appear we can properly calculate CPU resources from the information
+ // we know in libpod. Libpod knows CPUs by time, shares, etc.
+
+ // We also only know about a memory limit; no memory minimum
+ maxResources := make(map[v1.ResourceName]resource.Quantity)
+ minResources := make(map[v1.ResourceName]resource.Quantity)
+ config := c.Config()
+ maxMem := config.Spec.Linux.Resources.Memory.Limit
+
+ _ = maxMem
+
+ return maxResources, minResources
+}
+
+func generateKubeVolumeMount(hostSourcePath string, mounts []specs.Mount) (v1.VolumeMount, error) {
+ vm := v1.VolumeMount{}
+ for _, m := range mounts {
+ if m.Source == hostSourcePath {
+ // TODO Name is not provided and is required by Kube; therefore, this is disabled earlier
+ //vm.Name =
+ vm.MountPath = m.Source
+ vm.SubPath = m.Destination
+ if util.StringInSlice("ro", m.Options) {
+ vm.ReadOnly = true
+ }
+ return vm, nil
+ }
+ }
+ return vm, errors.New("unable to find mount source")
+}
+
+// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
+func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) {
+ // At this point, I dont think we can distinguish between the default
+ // volume mounts and user added ones. For now, we pass them all.
+ var vms []v1.VolumeMount
+ for _, hostSourcePath := range c.config.UserVolumes {
+ vm, err := generateKubeVolumeMount(hostSourcePath, c.config.Spec.Mounts)
+ if err != nil {
+ return vms, err
+ }
+ vms = append(vms, vm)
+ }
+ return vms, nil
+}
+
+func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
+ var (
+ drop []v1.Capability
+ add []v1.Capability
+ )
+ // Find caps in the defaultCaps but not in the container's
+ // those indicate a dropped cap
+ for _, capability := range defaultCaps {
+ if !util.StringInSlice(capability, containerCaps) {
+ cap := v1.Capability(capability)
+ drop = append(drop, cap)
+ }
+ }
+ // Find caps in the container but not in the defaults; those indicate
+ // an added cap
+ for _, capability := range containerCaps {
+ if !util.StringInSlice(capability, defaultCaps) {
+ cap := v1.Capability(capability)
+ add = append(add, cap)
+ }
+ }
+
+ return &v1.Capabilities{
+ Add: add,
+ Drop: drop,
+ }
+}
+
+func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) {
+ g, err := generate.New("linux")
+ if err != nil {
+ return nil, err
+ }
+ // Combine all the default capabilities into a slice
+ defaultCaps := append(g.Config.Process.Capabilities.Ambient, g.Config.Process.Capabilities.Bounding...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Effective...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Inheritable...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Permitted...)
+
+ // Combine all the container's capabilities into a slic
+ containerCaps := append(caps.Ambient, caps.Bounding...)
+ containerCaps = append(containerCaps, caps.Effective...)
+ containerCaps = append(containerCaps, caps.Inheritable...)
+ containerCaps = append(containerCaps, caps.Permitted...)
+
+ calculatedCaps := determineCapAddDropFromCapabilities(defaultCaps, containerCaps)
+ return calculatedCaps, nil
+}
+
+// generateKubeSecurityContext generates a securityContext based on the existing container
+func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
+ priv := c.Privileged()
+ ro := c.IsReadOnly()
+ allowPrivEscalation := !c.Spec().Process.NoNewPrivileges
+
+ newCaps, err := capAddDrop(c.config.Spec.Process.Capabilities)
+ if err != nil {
+ return nil, err
+ }
+
+ sc := v1.SecurityContext{
+ Capabilities: newCaps,
+ Privileged: &priv,
+ // TODO How do we know if selinux were passed into podman
+ //SELinuxOptions:
+ // RunAsNonRoot is an optional parameter; our first implementations should be root only; however
+ // I'm leaving this as a bread-crumb for later
+ //RunAsNonRoot: &nonRoot,
+ ReadOnlyRootFilesystem: &ro,
+ AllowPrivilegeEscalation: &allowPrivEscalation,
+ }
+
+ if c.User() != "" {
+ // It is *possible* that
+ logrus.Debugf("Looking in container for user: %s", c.User())
+ u, err := lookup.GetUser(c.state.Mountpoint, c.User())
+ if err != nil {
+ return nil, err
+ }
+ user := int64(u.Uid)
+ sc.RunAsUser = &user
+ }
+ return &sc, nil
+}
+
+// generateKubeVolumeDeviceFromLinuxDevice takes a list of devices and makes a VolumeDevice struct for kube
+func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) ([]v1.VolumeDevice, error) {
+ var volumeDevices []v1.VolumeDevice
+ for _, d := range devices {
+ vd := v1.VolumeDevice{
+ // TBD How are we going to sync up these names
+ //Name:
+ DevicePath: d.Path,
+ }
+ volumeDevices = append(volumeDevices, vd)
+ }
+ return volumeDevices, nil
+}
+
+func removeUnderscores(s string) string {
+ return strings.Replace(s, "_", "", -1)
+}
diff --git a/libpod/mounts_linux.go b/libpod/mounts_linux.go
new file mode 100644
index 000000000..e6aa09eac
--- /dev/null
+++ b/libpod/mounts_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+
+package libpod
+
+const (
+ // MountPrivate represents the private mount option.
+ MountPrivate = "private"
+ // MountRPrivate represents the rprivate mount option.
+ MountRPrivate = "rprivate"
+ // MountShared represents the shared mount option.
+ MountShared = "shared"
+ // MountRShared represents the rshared mount option.
+ MountRShared = "rshared"
+ // MountSlave represents the slave mount option.
+ MountSlave = "slave"
+ // MountRSlave represents the rslave mount option.
+ MountRSlave = "rslave"
+)
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 863a764e2..43d0a61a4 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -64,20 +64,20 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
}
}()
- networkStatus := make([]*cnitypes.Result, 1)
+ networkStatus := make([]*cnitypes.Result, 0)
for idx, r := range results {
logrus.Debugf("[%d] CNI result: %v", idx, r.String())
resultCurrent, err := cnitypes.GetResult(r)
if err != nil {
return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.String(), err)
}
- networkStatus = append(ctr.state.NetworkStatus, resultCurrent)
+ networkStatus = append(networkStatus, resultCurrent)
}
// Add firewall rules to ensure the container has network access.
// Will not be necessary once CNI firewall plugin merges upstream.
// https://github.com/containernetworking/plugins/pull/75
- for _, netStatus := range ctr.state.NetworkStatus {
+ for _, netStatus := range networkStatus {
firewallConf := &firewall.FirewallNetConf{
PrevResult: netStatus,
}
@@ -90,13 +90,16 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
}
// Create and configure a new network namespace for a container
-func (r *Runtime) createNetNS(ctr *Container) (ns.NetNS, []*cnitypes.Result, error) {
+func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q []*cnitypes.Result, err error) {
ctrNS, err := netns.NewNS()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating network namespace for container %s", ctr.ID())
}
defer func() {
if err != nil {
+ if err2 := netns.UnmountNS(ctrNS); err2 != nil {
+ logrus.Errorf("Error unmounting partially created network namespace for container %s: %v", ctr.ID(), err2)
+ }
if err2 := ctrNS.Close(); err2 != nil {
logrus.Errorf("Error closing partially created network namespace for container %s: %v", ctr.ID(), err2)
}
diff --git a/libpod/oci.go b/libpod/oci.go
index 233bacfbb..093bfdd35 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -227,7 +227,7 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
return files, nil
}
-func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreContainer bool) (err error) {
+func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
var stderrBuf bytes.Buffer
runtimeDir, err := util.GetRootlessRuntimeDir()
@@ -289,8 +289,11 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
args = append(args, "--syslog")
}
- if restoreContainer {
+ if restoreOptions != nil {
args = append(args, "--restore", ctr.CheckpointPath())
+ if restoreOptions.TCPEstablished {
+ args = append(args, "--restore-arg", "--tcp-established")
+ }
}
logrus.WithFields(logrus.Fields{
@@ -316,6 +319,10 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3))
cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4))
cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBPOD_USERNS_CONFIGURED=%s", os.Getenv("_LIBPOD_USERNS_CONFIGURED")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%s", os.Getenv("_LIBPOD_ROOTLESS_UID")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
if r.reservePorts {
ports, err := bindPorts(ctr.config.PortMappings)
@@ -329,7 +336,7 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
cmd.ExtraFiles = append(cmd.ExtraFiles, ports...)
}
- if rootless.IsRootless() {
+ if ctr.config.NetMode.IsSlirp4netns() {
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
if err != nil {
return errors.Wrapf(err, "failed to create rootless network sync pipe")
@@ -350,7 +357,8 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
// Set the label of the conmon process to be level :s0
// This will allow the container processes to talk to fifo-files
// passed into the container by conmon
- plabel, err := selinux.CurrentLabel()
+ var plabel string
+ plabel, err = selinux.CurrentLabel()
if err != nil {
childPipe.Close()
return errors.Wrapf(err, "Failed to get current SELinux label")
@@ -360,7 +368,7 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
runtime.LockOSThread()
if c["level"] != "s0" && c["level"] != "" {
c["level"] = "s0"
- if err := label.SetProcessLabel(c.Get()); err != nil {
+ if err = label.SetProcessLabel(c.Get()); err != nil {
runtime.UnlockOSThread()
return err
}
@@ -583,6 +591,9 @@ func (r *OCIRuntime) startContainer(ctr *Container) error {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
+ if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok {
+ env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify))
+ }
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "start", ctr.ID()); err != nil {
return err
}
@@ -685,8 +696,12 @@ func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error {
// deleteContainer deletes a container from the OCI runtime
func (r *OCIRuntime) deleteContainer(ctr *Container) error {
- _, err := utils.ExecCmd(r.path, "delete", "--force", ctr.ID())
- return err
+ runtimeDir, err := util.GetRootlessRuntimeDir()
+ if err != nil {
+ return err
+ }
+ env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
+ return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "delete", "--force", ctr.ID())
}
// pauseContainer pauses the given container
@@ -740,6 +755,8 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
if tty {
args = append(args, "--tty")
+ } else {
+ args = append(args, "--tty=false")
}
if user != "" {
@@ -843,13 +860,26 @@ func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error {
}
// checkpointContainer checkpoints the given container
-func (r *OCIRuntime) checkpointContainer(ctr *Container) error {
+func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
+ label.SetSocketLabel(ctr.ProcessLabel())
// imagePath is used by CRIU to store the actual checkpoint files
imagePath := ctr.CheckpointPath()
// workPath will be used to store dump.log and stats-dump
workPath := ctr.bundlePath()
logrus.Debugf("Writing checkpoint to %s", imagePath)
logrus.Debugf("Writing checkpoint logs to %s", workPath)
- return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, "checkpoint",
- "--image-path", imagePath, "--work-path", workPath, ctr.ID())
+ args := []string{}
+ args = append(args, "checkpoint")
+ args = append(args, "--image-path")
+ args = append(args, imagePath)
+ args = append(args, "--work-path")
+ args = append(args, workPath)
+ if options.KeepRunning {
+ args = append(args, "--leave-running")
+ }
+ if options.TCPEstablished {
+ args = append(args, "--tcp-established")
+ }
+ args = append(args, ctr.ID())
+ return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, args...)
}
diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go
index 0447670b3..2737a641e 100644
--- a/libpod/oci_linux.go
+++ b/libpod/oci_linux.go
@@ -19,6 +19,8 @@ import (
"golang.org/x/sys/unix"
)
+const unknownPackage = "Unknown"
+
func (r *OCIRuntime) moveConmonToCgroup(ctr *Container, cgroupParent string, cmd *exec.Cmd) error {
if os.Geteuid() == 0 {
if r.cgroupManager == SystemdCgroupsManager {
@@ -63,10 +65,10 @@ func newPipe() (parent *os.File, child *os.File, err error) {
// CreateContainer creates a container in the OCI runtime
// TODO terminal support for container
// Presently just ignoring conmon opts related to it
-func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreContainer bool) (err error) {
+func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
if ctr.state.UserNSRoot == "" {
// no need of an intermediate mount ns
- return r.createOCIContainer(ctr, cgroupParent, restoreContainer)
+ return r.createOCIContainer(ctr, cgroupParent, restoreOptions)
}
var wg sync.WaitGroup
wg.Add(1)
@@ -74,7 +76,8 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
defer wg.Done()
runtime.LockOSThread()
- fd, err := os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid()))
+ var fd *os.File
+ fd, err = os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid()))
if err != nil {
return
}
@@ -103,7 +106,7 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
if err != nil {
return
}
- err = r.createOCIContainer(ctr, cgroupParent, restoreContainer)
+ err = r.createOCIContainer(ctr, cgroupParent, restoreOptions)
}()
wg.Wait()
@@ -111,7 +114,7 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
}
func rpmVersion(path string) string {
- output := "Unknown"
+ output := unknownPackage
cmd := exec.Command("/usr/bin/rpm", "-q", "-f", path)
if outp, err := cmd.Output(); err == nil {
output = string(outp)
@@ -120,7 +123,7 @@ func rpmVersion(path string) string {
}
func dpkgVersion(path string) string {
- output := "Unknown"
+ output := unknownPackage
cmd := exec.Command("/usr/bin/dpkg", "-S", path)
if outp, err := cmd.Output(); err == nil {
output = string(outp)
@@ -129,14 +132,14 @@ func dpkgVersion(path string) string {
}
func (r *OCIRuntime) pathPackage() string {
- if out := rpmVersion(r.path); out != "Unknown" {
+ if out := rpmVersion(r.path); out != unknownPackage {
return out
}
return dpkgVersion(r.path)
}
func (r *OCIRuntime) conmonPackage() string {
- if out := rpmVersion(r.conmonPath); out != "Unknown" {
+ if out := rpmVersion(r.conmonPath); out != unknownPackage {
return out
}
return dpkgVersion(r.conmonPath)
diff --git a/libpod/oci_unsupported.go b/libpod/oci_unsupported.go
index b133eb402..8c084d1e2 100644
--- a/libpod/oci_unsupported.go
+++ b/libpod/oci_unsupported.go
@@ -15,7 +15,7 @@ func newPipe() (parent *os.File, child *os.File, err error) {
return nil, nil, ErrNotImplemented
}
-func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreContainer bool) (err error) {
+func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
return ErrNotImplemented
}
diff --git a/libpod/options.go b/libpod/options.go
index 8d044313b..9aa657ddd 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -7,6 +7,7 @@ import (
"regexp"
"syscall"
+ "github.com/containers/libpod/pkg/namespaces"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -28,19 +29,59 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
return ErrRuntimeFinalized
}
- rt.config.StorageConfig.RunRoot = config.RunRoot
- rt.config.StorageConfig.GraphRoot = config.GraphRoot
- rt.config.StorageConfig.GraphDriverName = config.GraphDriverName
- rt.config.StaticDir = filepath.Join(config.GraphRoot, "libpod")
+ setField := false
- rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions))
- copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions)
+ if config.RunRoot != "" {
+ rt.config.StorageConfig.RunRoot = config.RunRoot
+ rt.configuredFrom.storageRunRootSet = true
+ setField = true
+ }
+
+ if config.GraphRoot != "" {
+ rt.config.StorageConfig.GraphRoot = config.GraphRoot
+ rt.configuredFrom.storageGraphRootSet = true
+
+ // Also set libpod static dir, so we are a subdirectory
+ // of the c/storage store by default
+ rt.config.StaticDir = filepath.Join(config.GraphRoot, "libpod")
+ rt.configuredFrom.libpodStaticDirSet = true
+
+ setField = true
+ }
+
+ if config.GraphDriverName != "" {
+ rt.config.StorageConfig.GraphDriverName = config.GraphDriverName
+ rt.configuredFrom.storageGraphDriverSet = true
+ setField = true
+ }
+
+ if config.GraphDriverOptions != nil {
+ rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions))
+ copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions)
+ setField = true
+ }
- rt.config.StorageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap))
- copy(rt.config.StorageConfig.UIDMap, config.UIDMap)
+ if config.UIDMap != nil {
+ rt.config.StorageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap))
+ copy(rt.config.StorageConfig.UIDMap, config.UIDMap)
+ }
- rt.config.StorageConfig.GIDMap = make([]idtools.IDMap, len(config.GIDMap))
- copy(rt.config.StorageConfig.GIDMap, config.GIDMap)
+ if config.GIDMap != nil {
+ rt.config.StorageConfig.GIDMap = make([]idtools.IDMap, len(config.GIDMap))
+ copy(rt.config.StorageConfig.GIDMap, config.GIDMap)
+ }
+
+ // If any one of runroot, graphroot, graphdrivername,
+ // or graphdriveroptions are set, then GraphRoot and RunRoot
+ // must be set
+ if setField {
+ if rt.config.StorageConfig.GraphRoot == "" {
+ rt.config.StorageConfig.GraphRoot = storage.DefaultStoreOptions.GraphRoot
+ }
+ if rt.config.StorageConfig.RunRoot == "" {
+ rt.config.StorageConfig.RunRoot = storage.DefaultStoreOptions.RunRoot
+ }
+ }
return nil
}
@@ -173,26 +214,26 @@ func WithStaticDir(dir string) RuntimeOption {
}
rt.config.StaticDir = dir
+ rt.configuredFrom.libpodStaticDirSet = true
return nil
}
}
-// WithHooksDir sets the directory to look for OCI runtime hooks config.
-// Note we are not saving this in database, since this is really just for used
-// for testing.
-func WithHooksDir(hooksDir string) RuntimeOption {
+// WithHooksDir sets the directories to look for OCI runtime hook configuration.
+func WithHooksDir(hooksDirs ...string) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
return ErrRuntimeFinalized
}
- if hooksDir == "" {
- return errors.Wrap(ErrInvalidArg, "empty-string hook directories are not supported")
+ for _, hooksDir := range hooksDirs {
+ if hooksDir == "" {
+ return errors.Wrap(ErrInvalidArg, "empty-string hook directories are not supported")
+ }
}
- rt.config.HooksDir = []string{hooksDir}
- rt.config.HooksDirNotExistFatal = true
+ rt.config.HooksDir = hooksDirs
return nil
}
}
@@ -225,6 +266,7 @@ func WithTmpDir(dir string) RuntimeOption {
}
rt.config.TmpDir = dir
+ rt.configuredFrom.libpodTmpDirSet = true
return nil
}
@@ -304,6 +346,22 @@ func WithNamespace(ns string) RuntimeOption {
}
}
+// WithVolumePath sets the path under which all named volumes
+// should be created.
+// The path changes based on whethe rthe user is running as root
+// or not.
+func WithVolumePath(volPath string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.VolumePath = volPath
+
+ return nil
+ }
+}
+
// WithDefaultInfraImage sets the infra image for libpod.
// An infra image is used for inter-container kernel
// namespace sharing within a pod. Typically, an infra
@@ -817,7 +875,7 @@ func WithDependencyCtrs(ctrs []*Container) CtrCreateOption {
// namespace with a minimal configuration.
// An optional array of port mappings can be provided.
// Conflicts with WithNetNSFrom().
-func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, networks []string) CtrCreateOption {
+func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmode string, networks []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
return ErrCtrFinalized
@@ -831,6 +889,7 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netwo
ctr.config.CreateNetNS = true
ctr.config.PortMappings = portMappings
ctr.config.Networks = networks
+ ctr.config.NetMode = namespaces.NetworkMode(netmode)
return nil
}
@@ -1101,6 +1160,70 @@ func withIsInfra() CtrCreateOption {
}
}
+// Volume Creation Options
+
+// WithVolumeName sets the name of the volume.
+func WithVolumeName(name string) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return ErrVolumeFinalized
+ }
+
+ // Check the name against a regex
+ if !nameRegex.MatchString(name) {
+ return errors.Wrapf(ErrInvalidArg, "name must match regex [a-zA-Z0-9_-]+")
+ }
+ volume.config.Name = name
+
+ return nil
+ }
+}
+
+// WithVolumeLabels sets the labels of the volume.
+func WithVolumeLabels(labels map[string]string) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return ErrVolumeFinalized
+ }
+
+ volume.config.Labels = make(map[string]string)
+ for key, value := range labels {
+ volume.config.Labels[key] = value
+ }
+
+ return nil
+ }
+}
+
+// WithVolumeDriver sets the driver of the volume.
+func WithVolumeDriver(driver string) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return ErrVolumeFinalized
+ }
+
+ volume.config.Driver = driver
+
+ return nil
+ }
+}
+
+// WithVolumeOptions sets the options of the volume.
+func WithVolumeOptions(options map[string]string) VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return ErrVolumeFinalized
+ }
+
+ volume.config.Options = make(map[string]string)
+ for key, value := range options {
+ volume.config.Options[key] = value
+ }
+
+ return nil
+ }
+}
+
// Pod Creation Options
// WithPodName sets the name of the pod.
@@ -1295,3 +1418,14 @@ func WithInfraContainer() PodCreateOption {
return nil
}
}
+
+// WithInfraContainerPorts tells the pod to add port bindings to the pause container
+func WithInfraContainerPorts(bindings []ocicni.PortMapping) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return ErrPodFinalized
+ }
+ pod.config.InfraContainer.PortBindings = bindings
+ return nil
+ }
+}
diff --git a/libpod/pod.go b/libpod/pod.go
index 8ac976f6a..07f41f5c6 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -4,6 +4,7 @@ import (
"time"
"github.com/containers/storage"
+ "github.com/cri-o/ocicni/pkg/ocicni"
"github.com/pkg/errors"
)
@@ -96,7 +97,8 @@ type PodContainerInfo struct {
// InfraContainerConfig is the configuration for the pod's infra container
type InfraContainerConfig struct {
- HasInfraContainer bool `json:"makeInfraContainer"`
+ HasInfraContainer bool `json:"makeInfraContainer"`
+ PortBindings []ocicni.PortMapping `json:"infraPortBindings"`
}
// ID retrieves the pod's ID
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 3d5512e8c..cbac2420f 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -62,7 +62,13 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
return nil, nil
}
-// Stop stops all containers within a pod that are not already stopped
+// Stop stops all containers within a pod without a timeout. It assumes -1 for
+// a timeout.
+func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error) {
+ return p.StopWithTimeout(ctx, cleanup, -1)
+}
+
+// StopWithTimeout stops all containers within a pod that are not already stopped
// Each container will use its own stop timeout
// Only running containers will be stopped. Paused, stopped, or created
// containers will be ignored.
@@ -77,7 +83,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrCtrExists
// If both error and the map are nil, all containers were stopped without error
-func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error) {
+func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) {
p.lock.Lock()
defer p.lock.Unlock()
@@ -110,8 +116,11 @@ func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error)
ctr.lock.Unlock()
continue
}
-
- if err := ctr.stop(ctr.config.StopTimeout); err != nil {
+ stopTimeout := ctr.config.StopTimeout
+ if timeout > -1 {
+ stopTimeout = uint(timeout)
+ }
+ if err := ctr.stop(stopTimeout); err != nil {
ctr.lock.Unlock()
ctrErrors[ctr.ID()] = err
continue
diff --git a/libpod/pod_easyjson.go b/libpod/pod_easyjson.go
index 6c1c939f3..8ea9a5e72 100644
--- a/libpod/pod_easyjson.go
+++ b/libpod/pod_easyjson.go
@@ -6,6 +6,7 @@ package libpod
import (
json "encoding/json"
+ ocicni "github.com/cri-o/ocicni/pkg/ocicni"
easyjson "github.com/mailru/easyjson"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
@@ -721,6 +722,29 @@ func easyjsonBe091417DecodeGithubComContainersLibpodLibpod5(in *jlexer.Lexer, ou
switch key {
case "makeInfraContainer":
out.HasInfraContainer = bool(in.Bool())
+ case "infraPortBindings":
+ if in.IsNull() {
+ in.Skip()
+ out.PortBindings = nil
+ } else {
+ in.Delim('[')
+ if out.PortBindings == nil {
+ if !in.IsDelim(']') {
+ out.PortBindings = make([]ocicni.PortMapping, 0, 1)
+ } else {
+ out.PortBindings = []ocicni.PortMapping{}
+ }
+ } else {
+ out.PortBindings = (out.PortBindings)[:0]
+ }
+ for !in.IsDelim(']') {
+ var v6 ocicni.PortMapping
+ easyjsonBe091417DecodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(in, &v6)
+ out.PortBindings = append(out.PortBindings, v6)
+ in.WantComma()
+ }
+ in.Delim(']')
+ }
default:
in.SkipRecursive()
}
@@ -745,5 +769,109 @@ func easyjsonBe091417EncodeGithubComContainersLibpodLibpod5(out *jwriter.Writer,
}
out.Bool(bool(in.HasInfraContainer))
}
+ {
+ const prefix string = ",\"infraPortBindings\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ if in.PortBindings == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 {
+ out.RawString("null")
+ } else {
+ out.RawByte('[')
+ for v7, v8 := range in.PortBindings {
+ if v7 > 0 {
+ out.RawByte(',')
+ }
+ easyjsonBe091417EncodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(out, v8)
+ }
+ out.RawByte(']')
+ }
+ }
+ out.RawByte('}')
+}
+func easyjsonBe091417DecodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(in *jlexer.Lexer, out *ocicni.PortMapping) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "hostPort":
+ out.HostPort = int32(in.Int32())
+ case "containerPort":
+ out.ContainerPort = int32(in.Int32())
+ case "protocol":
+ out.Protocol = string(in.String())
+ case "hostIP":
+ out.HostIP = string(in.String())
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjsonBe091417EncodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(out *jwriter.Writer, in ocicni.PortMapping) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ {
+ const prefix string = ",\"hostPort\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int32(int32(in.HostPort))
+ }
+ {
+ const prefix string = ",\"containerPort\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int32(int32(in.ContainerPort))
+ }
+ {
+ const prefix string = ",\"protocol\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Protocol))
+ }
+ {
+ const prefix string = ",\"hostIP\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.HostIP))
+ }
out.RawByte('}')
}
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 46162c7ef..39a25c004 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -48,7 +48,7 @@ func (p *Pod) updatePod() error {
// Save pod state to database
func (p *Pod) save() error {
if err := p.runtime.state.SavePod(p); err != nil {
- return errors.Wrapf(err, "error saving pod %s state")
+ return errors.Wrapf(err, "error saving pod %s state", p.ID())
}
return nil
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 318cd0369..2dfebf565 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -12,7 +12,6 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/firewall"
- "github.com/containers/libpod/pkg/hooks"
sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
@@ -84,6 +83,7 @@ type Runtime struct {
lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
+ configuredFrom *runtimeConfiguredFrom
}
// RuntimeConfig contains configuration options used to set up the runtime
@@ -92,6 +92,7 @@ type RuntimeConfig struct {
// Not included in on-disk config, use the dedicated containers/storage
// configuration file instead
StorageConfig storage.StoreOptions `toml:"-"`
+ VolumePath string `toml:"volume_path"`
// ImageDefaultTransport is the default transport method used to fetch
// images
ImageDefaultTransport string `toml:"image_default_transport"`
@@ -141,11 +142,11 @@ type RuntimeConfig struct {
// CNIDefaultNetwork is the network name of the default CNI network
// to attach pods to
CNIDefaultNetwork string `toml:"cni_default_network,omitempty"`
- // HooksDir Path to the directory containing hooks configuration files
+ // HooksDir holds paths to the directories containing hooks
+ // configuration files. When the same filename is present in in
+ // multiple directories, the file in the directory listed last in
+ // this slice takes precedence.
HooksDir []string `toml:"hooks_dir"`
- // HooksDirNotExistFatal switches between fatal errors and non-fatal
- // warnings if the configured HooksDir does not exist.
- HooksDirNotExistFatal bool `toml:"hooks_dir_not_exist_fatal"`
// DefaultMountsFile is the path to the default mounts file for testing
// purposes only
DefaultMountsFile string `toml:"-"`
@@ -175,6 +176,20 @@ type RuntimeConfig struct {
EnableLabeling bool `toml:"label"`
}
+// runtimeConfiguredFrom is a struct used during early runtime init to help
+// assemble the full RuntimeConfig struct from defaults.
+// It indicated whether several fields in the runtime configuration were set
+// explicitly.
+// If they were not, we may override them with information from the database,
+// if it exists and differs from what is present in the system already.
+type runtimeConfiguredFrom struct {
+ storageGraphDriverSet bool
+ storageGraphRootSet bool
+ storageRunRootSet bool
+ libpodStaticDirSet bool
+ libpodTmpDirSet bool
+}
+
var (
defaultRuntimeConfig = RuntimeConfig{
// Leave this empty so containers/storage will use its defaults
@@ -203,7 +218,6 @@ var (
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
},
CgroupManager: SystemdCgroupsManager,
- HooksDir: []string{hooks.DefaultDir, hooks.OverrideDir},
StaticDir: filepath.Join(storage.DefaultStoreOptions.GraphRoot, "libpod"),
TmpDir: "",
MaxLogSize: -1,
@@ -253,6 +267,7 @@ func SetXdgRuntimeDir(val string) error {
func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
runtime = new(Runtime)
runtime.config = new(RuntimeConfig)
+ runtime.configuredFrom = new(runtimeConfiguredFrom)
// Copy the default configuration
tmpDir, err := getDefaultTmpDir()
@@ -262,8 +277,20 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
deepcopier.Copy(defaultRuntimeConfig).To(runtime.config)
runtime.config.TmpDir = tmpDir
+ if rootless.IsRootless() {
+ // If we're rootless, override the default storage config
+ storageConf, volumePath, err := util.GetDefaultStoreOptions()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error retrieving rootless storage config")
+ }
+ runtime.config.StorageConfig = storageConf
+ runtime.config.StaticDir = filepath.Join(storageConf.GraphRoot, "libpod")
+ runtime.config.VolumePath = volumePath
+ }
+
configPath := ConfigPath
foundConfig := true
+ rootlessConfigPath := ""
if rootless.IsRootless() {
home := os.Getenv("HOME")
if runtime.config.SignaturePolicyPath == "" {
@@ -272,7 +299,10 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
runtime.config.SignaturePolicyPath = newPath
}
}
- configPath = filepath.Join(home, ".config/containers/libpod.conf")
+
+ rootlessConfigPath = filepath.Join(home, ".config/containers/libpod.conf")
+
+ configPath = rootlessConfigPath
if _, err := os.Stat(configPath); err != nil {
foundConfig = false
}
@@ -303,6 +333,25 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
if err != nil {
return nil, errors.Wrapf(err, "error reading configuration file %s", configPath)
}
+
+ // This is ugly, but we need to decode twice.
+ // Once to check if libpod static and tmp dirs were explicitly
+ // set (not enough to check if they're not the default value,
+ // might have been explicitly configured to the default).
+ // A second time to actually get a usable config.
+ tmpConfig := new(RuntimeConfig)
+ if _, err := toml.Decode(string(contents), tmpConfig); err != nil {
+ return nil, errors.Wrapf(err, "error decoding configuration file %s",
+ configPath)
+ }
+
+ if tmpConfig.StaticDir != "" {
+ runtime.configuredFrom.libpodStaticDirSet = true
+ }
+ if tmpConfig.TmpDir != "" {
+ runtime.configuredFrom.libpodTmpDirSet = true
+ }
+
if _, err := toml.Decode(string(contents), runtime.config); err != nil {
return nil, errors.Wrapf(err, "error decoding configuration file %s", configPath)
}
@@ -317,6 +366,22 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
if err := makeRuntime(runtime); err != nil {
return nil, err
}
+
+ if !foundConfig && rootlessConfigPath != "" {
+ os.MkdirAll(filepath.Dir(rootlessConfigPath), 0755)
+ file, err := os.OpenFile(rootlessConfigPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil && !os.IsExist(err) {
+ return nil, errors.Wrapf(err, "cannot open file %s", rootlessConfigPath)
+ }
+ if err == nil {
+ defer file.Close()
+ enc := toml.NewEncoder(file)
+ if err := enc.Encode(runtime.config); err != nil {
+ os.Remove(rootlessConfigPath)
+ }
+ }
+ }
+
return runtime, nil
}
@@ -328,6 +393,7 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
func NewRuntimeFromConfig(configPath string, options ...RuntimeOption) (runtime *Runtime, err error) {
runtime = new(Runtime)
runtime.config = new(RuntimeConfig)
+ runtime.configuredFrom = new(runtimeConfiguredFrom)
// Set two fields not in the TOML config
runtime.config.StateType = defaultRuntimeConfig.StateType
@@ -406,6 +472,77 @@ func makeRuntime(runtime *Runtime) (err error) {
runtime.config.ConmonPath)
}
+ // Make the static files directory if it does not exist
+ if err := os.MkdirAll(runtime.config.StaticDir, 0700); err != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err) {
+ return errors.Wrapf(err, "error creating runtime static files directory %s",
+ runtime.config.StaticDir)
+ }
+ }
+
+ // Set up the state
+ switch runtime.config.StateType {
+ case InMemoryStateStore:
+ state, err := NewInMemoryState()
+ if err != nil {
+ return err
+ }
+ runtime.state = state
+ case SQLiteStateStore:
+ return errors.Wrapf(ErrInvalidArg, "SQLite state is currently disabled")
+ case BoltDBStateStore:
+ dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db")
+
+ state, err := NewBoltState(dbPath, runtime)
+ if err != nil {
+ return err
+ }
+ runtime.state = state
+ default:
+ return errors.Wrapf(ErrInvalidArg, "unrecognized state type passed")
+ }
+
+ // Grab config from the database so we can reset some defaults
+ dbConfig, err := runtime.state.GetDBConfig()
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving runtime configuration from database")
+ }
+
+ // Reset defaults if they were not explicitly set
+ if !runtime.configuredFrom.storageGraphDriverSet && dbConfig.GraphDriver != "" {
+ runtime.config.StorageConfig.GraphDriverName = dbConfig.GraphDriver
+ }
+ if !runtime.configuredFrom.storageGraphRootSet && dbConfig.StorageRoot != "" {
+ runtime.config.StorageConfig.GraphRoot = dbConfig.StorageRoot
+ }
+ if !runtime.configuredFrom.storageRunRootSet && dbConfig.StorageTmp != "" {
+ runtime.config.StorageConfig.RunRoot = dbConfig.StorageTmp
+ }
+ if !runtime.configuredFrom.libpodStaticDirSet && dbConfig.LibpodRoot != "" {
+ runtime.config.StaticDir = dbConfig.LibpodRoot
+ }
+ if !runtime.configuredFrom.libpodTmpDirSet && dbConfig.LibpodTmp != "" {
+ runtime.config.TmpDir = dbConfig.LibpodTmp
+ }
+
+ logrus.Debugf("Using graph driver %s", runtime.config.StorageConfig.GraphDriverName)
+ logrus.Debugf("Using graph root %s", runtime.config.StorageConfig.GraphRoot)
+ logrus.Debugf("Using run root %s", runtime.config.StorageConfig.RunRoot)
+ logrus.Debugf("Using static dir %s", runtime.config.StaticDir)
+ logrus.Debugf("Using tmp dir %s", runtime.config.TmpDir)
+
+ // Validate our config against the database, now that we've set our
+ // final storage configuration
+ if err := runtime.state.ValidateDBConfig(runtime); err != nil {
+ return err
+ }
+
+ if err := runtime.state.SetNamespace(runtime.config.Namespace); err != nil {
+ return errors.Wrapf(err, "error setting libpod namespace in state")
+ }
+ logrus.Debugf("Set libpod namespace to %q", runtime.config.Namespace)
+
// Set up containers/storage
var store storage.Store
if rootless.SkipStorageSetup() {
@@ -473,15 +610,6 @@ func makeRuntime(runtime *Runtime) (err error) {
}
runtime.ociRuntime = ociRuntime
- // Make the static files directory if it does not exist
- if err := os.MkdirAll(runtime.config.StaticDir, 0755); err != nil {
- // The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrapf(err, "error creating runtime static files directory %s",
- runtime.config.StaticDir)
- }
- }
-
// Make a directory to hold container lockfiles
lockDir := filepath.Join(runtime.config.TmpDir, "lock")
if err := os.MkdirAll(lockDir, 0755); err != nil {
@@ -503,11 +631,13 @@ func makeRuntime(runtime *Runtime) (err error) {
}
// Set up the CNI net plugin
- netPlugin, err := ocicni.InitCNI(runtime.config.CNIDefaultNetwork, runtime.config.CNIConfigDir, runtime.config.CNIPluginDir...)
- if err != nil {
- return errors.Wrapf(err, "error configuring CNI network plugin")
+ if !rootless.IsRootless() {
+ netPlugin, err := ocicni.InitCNI(runtime.config.CNIDefaultNetwork, runtime.config.CNIConfigDir, runtime.config.CNIPluginDir...)
+ if err != nil {
+ return errors.Wrapf(err, "error configuring CNI network plugin")
+ }
+ runtime.netPlugin = netPlugin
}
- runtime.netPlugin = netPlugin
// Set up a firewall backend
backendType := ""
@@ -520,33 +650,6 @@ func makeRuntime(runtime *Runtime) (err error) {
}
runtime.firewallBackend = fwBackend
- // Set up the state
- switch runtime.config.StateType {
- case InMemoryStateStore:
- state, err := NewInMemoryState()
- if err != nil {
- return err
- }
- runtime.state = state
- case SQLiteStateStore:
- return errors.Wrapf(ErrInvalidArg, "SQLite state is currently disabled")
- case BoltDBStateStore:
- dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db")
-
- state, err := NewBoltState(dbPath, runtime.lockDir, runtime)
- if err != nil {
- return err
- }
- runtime.state = state
- default:
- return errors.Wrapf(ErrInvalidArg, "unrecognized state type passed")
- }
-
- if err := runtime.state.SetNamespace(runtime.config.Namespace); err != nil {
- return errors.Wrapf(err, "error setting libpod namespace in state")
- }
- logrus.Debugf("Set libpod namespace to %q", runtime.config.Namespace)
-
// We now need to see if the system has restarted
// We check for the presence of a file in our tmp directory to verify this
// This check must be locked to prevent races
@@ -774,3 +877,8 @@ func (r *Runtime) generateName() (string, error) {
func (r *Runtime) ImageRuntime() *image.Runtime {
return r.imageRuntime
}
+
+// SystemContext returns the imagecontext
+func (r *Runtime) SystemContext() *types.SystemContext {
+ return r.imageContext
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 09dc7c48b..ba8eaacbe 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -154,6 +154,24 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
}()
+ // Go through the volume mounts and check for named volumes
+ // If the named volme already exists continue, otherwise create
+ // the storage for the named volume.
+ for i, vol := range ctr.config.Spec.Mounts {
+ if vol.Source[0] != '/' && isNamedVolume(vol.Source) {
+ volInfo, err := r.state.Volume(vol.Source)
+ if err != nil {
+ newVol, err := r.newVolume(ctx, WithVolumeName(vol.Source))
+ if err != nil {
+ logrus.Errorf("error creating named volume %q: %v", vol.Source, err)
+ }
+ ctr.config.Spec.Mounts[i].Source = newVol.MountPoint()
+ continue
+ }
+ ctr.config.Spec.Mounts[i].Source = volInfo.MountPoint()
+ }
+ }
+
if ctr.config.LogPath == "" {
ctr.config.LogPath = filepath.Join(ctr.config.StaticDir, "ctr.log")
}
@@ -170,6 +188,7 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
}
+
// Add the container to the state
// TODO: May be worth looking into recovering from name/ID collisions here
if ctr.config.Pod != "" {
@@ -246,7 +265,19 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
if c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is paused, cannot remove until unpaused", c.ID())
+ if !force {
+ return errors.Wrapf(ErrCtrStateInvalid, "container %s is paused, cannot remove until unpaused", c.ID())
+ }
+ if err := c.runtime.ociRuntime.killContainer(c, 9); err != nil {
+ return err
+ }
+ if err := c.unpause(); err != nil {
+ return err
+ }
+ // Need to update container state to make sure we know it's stopped
+ if err := c.waitForExitFileAndSync(); err != nil {
+ return err
+ }
}
// Check that the container's in a good state to be removed
@@ -462,3 +493,11 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
}
return ctrs[lastCreatedIndex], nil
}
+
+// Check if volName is a named volume and not one of the default mounts we add to containers
+func isNamedVolume(volName string) bool {
+ if volName != "proc" && volName != "tmpfs" && volName != "devpts" && volName != "shm" && volName != "mqueue" && volName != "sysfs" && volName != "cgroup" {
+ return true
+ }
+ return false
+}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index be8711734..66844bb31 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -3,50 +3,15 @@ package libpod
import (
"context"
"fmt"
- "io"
"github.com/containers/buildah/imagebuildah"
- "github.com/containers/libpod/libpod/common"
"github.com/containers/libpod/libpod/image"
"github.com/containers/storage"
- "github.com/containers/storage/pkg/archive"
- ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Runtime API
-// CopyOptions contains the options given when pushing or pulling images
-type CopyOptions struct {
- // Compression specifies the type of compression which is applied to
- // layer blobs. The default is to not use compression, but
- // archive.Gzip is recommended.
- Compression archive.Compression
- // DockerRegistryOptions encapsulates settings that affect how we
- // connect or authenticate to a remote registry to which we want to
- // push the image.
- common.DockerRegistryOptions
- // SigningOptions encapsulates settings that control whether or not we
- // strip or add signatures to the image when pushing (uploading) the
- // image to a registry.
- common.SigningOptions
-
- // SigningPolicyPath this points to a alternative signature policy file, used mainly for testing
- SignaturePolicyPath string
- // AuthFile is the path of the cached credentials file defined by the user
- AuthFile string
- // Writer is the reportWriter for the output
- Writer io.Writer
- // Reference is the name for the image created when a tar archive is imported
- Reference string
- // ImageConfig is the Image spec for the image created when a tar archive is imported
- ImageConfig ociv1.Image
- // ManifestMIMEType is the manifest type of the image when saving to a directory
- ManifestMIMEType string
- // ForceCompress compresses the image layers when saving to a directory using the dir transport if true
- ForceCompress bool
-}
-
// RemoveImage deletes an image from local storage
// Images being used by running containers can only be removed if force=true
func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool) (string, error) {
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index fea79e994..5e1051150 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -7,7 +7,6 @@ import (
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
- "github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
)
@@ -50,9 +49,12 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID
options = append(options, withIsInfra())
// Since user namespace sharing is not implemented, we only need to check if it's rootless
- portMappings := make([]ocicni.PortMapping, 0)
networks := make([]string, 0)
- options = append(options, WithNetNS(portMappings, isRootless, networks))
+ netmode := "bridge"
+ if isRootless {
+ netmode = "slirp4netns"
+ }
+ options = append(options, WithNetNS(p.config.InfraContainer.PortBindings, isRootless, netmode, networks))
return r.newContainer(ctx, g.Config, options...)
}
@@ -65,7 +67,7 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container,
return nil, ErrRuntimeStopped
}
- newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, false, false)
+ newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, false)
if err != nil {
return nil, err
}
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
new file mode 100644
index 000000000..3921758ee
--- /dev/null
+++ b/libpod/runtime_volume.go
@@ -0,0 +1,107 @@
+package libpod
+
+import (
+ "context"
+)
+
+// Contains the public Runtime API for volumes
+
+// A VolumeCreateOption is a functional option which alters the Volume created by
+// NewVolume
+type VolumeCreateOption func(*Volume) error
+
+// VolumeFilter is a function to determine whether a volume is included in command
+// output. Volumes to be outputted are tested using the function. a true return will
+// include the volume, a false return will exclude it.
+type VolumeFilter func(*Volume) bool
+
+// RemoveVolume removes a volumes
+func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return ErrRuntimeStopped
+ }
+
+ if !v.valid {
+ if ok, _ := r.state.HasVolume(v.Name()); !ok {
+ // Volume probably already removed
+ // Or was never in the runtime to begin with
+ return nil
+ }
+ }
+
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ return r.removeVolume(ctx, v, force, prune)
+}
+
+// GetVolume retrieves a volume by its name
+func (r *Runtime) GetVolume(name string) (*Volume, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ return r.state.Volume(name)
+}
+
+// HasVolume checks to see if a volume with the given name exists
+func (r *Runtime) HasVolume(name string) (bool, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return false, ErrRuntimeStopped
+ }
+
+ return r.state.HasVolume(name)
+}
+
+// Volumes retrieves all volumes
+// Filters can be provided which will determine which volumes are included in the
+// output. Multiple filters are handled by ANDing their output, so only volumes
+// matching all filters are returned
+func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ vols, err := r.state.AllVolumes()
+ if err != nil {
+ return nil, err
+ }
+
+ volsFiltered := make([]*Volume, 0, len(vols))
+ for _, vol := range vols {
+ include := true
+ for _, filter := range filters {
+ include = include && filter(vol)
+ }
+
+ if include {
+ volsFiltered = append(volsFiltered, vol)
+ }
+ }
+
+ return volsFiltered, nil
+}
+
+// GetAllVolumes retrieves all the volumes
+func (r *Runtime) GetAllVolumes() ([]*Volume, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ return r.state.AllVolumes()
+}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
new file mode 100644
index 000000000..5cc0938f0
--- /dev/null
+++ b/libpod/runtime_volume_linux.go
@@ -0,0 +1,132 @@
+// +build linux
+
+package libpod
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// NewVolume creates a new empty volume
+func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+ return r.newVolume(ctx, options...)
+}
+
+// newVolume creates a new empty volume
+func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) {
+ volume, err := newVolume(r)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating volume")
+ }
+
+ for _, option := range options {
+ if err := option(volume); err != nil {
+ return nil, errors.Wrapf(err, "error running volume create option")
+ }
+ }
+
+ if volume.config.Name == "" {
+ volume.config.Name = stringid.GenerateNonCryptoID()
+ }
+ // TODO: support for other volume drivers
+ if volume.config.Driver == "" {
+ volume.config.Driver = "local"
+ }
+ // TODO: determine when the scope is global and set it to that
+ if volume.config.Scope == "" {
+ volume.config.Scope = "local"
+ }
+
+ // Create the mountpoint of this volume
+ fullVolPath := filepath.Join(r.config.VolumePath, volume.config.Name, "_data")
+ if err := os.MkdirAll(fullVolPath, 0755); err != nil {
+ return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath)
+ }
+ _, mountLabel, err := label.InitLabels([]string{})
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting default mountlabels")
+ }
+ if err := label.ReleaseLabel(mountLabel); err != nil {
+ return nil, errors.Wrapf(err, "error releasing label %q", mountLabel)
+ }
+ if err := label.Relabel(fullVolPath, mountLabel, true); err != nil {
+ return nil, errors.Wrapf(err, "error setting selinux label to %q", fullVolPath)
+ }
+ volume.config.MountPoint = fullVolPath
+
+ // Path our lock file will reside at
+ lockPath := filepath.Join(r.lockDir, volume.config.Name)
+ // Grab a lockfile at the given path
+ lock, err := storage.GetLockfile(lockPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating lockfile for new volume")
+ }
+ volume.lock = lock
+
+ volume.valid = true
+
+ // Add the volume to state
+ if err := r.state.AddVolume(volume); err != nil {
+ return nil, errors.Wrapf(err, "error adding volume to state")
+ }
+
+ return volume, nil
+}
+
+// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
+ if !v.valid {
+ return ErrNoSuchVolume
+ }
+
+ deps, err := r.state.VolumeInUse(v)
+ if err != nil {
+ return err
+ }
+ if len(deps) != 0 {
+ if prune {
+ return ErrVolumeBeingUsed
+ }
+ depsStr := strings.Join(deps, ", ")
+ if !force {
+ return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
+ }
+ // If using force, log the warning that the volume is being used by at least one container
+ logrus.Warnf("volume %s is being used by the following container(s): %s", v.Name(), depsStr)
+ // Remove the container dependencies so we can go ahead and delete the volume
+ for _, dep := range deps {
+ if err := r.state.RemoveVolCtrDep(v, dep); err != nil {
+ return errors.Wrapf(err, "unable to remove container dependency %q from volume %q while trying to delete volume by force", dep, v.Name())
+ }
+ }
+ }
+
+ // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ if err := v.teardownStorage(); err != nil {
+ return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ }
+
+ // Remove the volume from the state
+ if err := r.state.RemoveVolume(v); err != nil {
+ return errors.Wrapf(err, "error removing volume %s", v.Name())
+ }
+
+ // Set volume as invalid so it can no longer be used
+ v.valid = false
+
+ return nil
+}
diff --git a/libpod/state.go b/libpod/state.go
index 273e81318..88d89f673 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -1,5 +1,15 @@
package libpod
+// DBConfig is a set of Libpod runtime configuration settings that are saved
+// in a State when it is first created, and can subsequently be retrieved.
+type DBConfig struct {
+ LibpodRoot string
+ LibpodTmp string
+ StorageRoot string
+ StorageTmp string
+ GraphDriver string
+}
+
// State is a storage backend for libpod's current state.
// A State is only initialized once per instance of libpod.
// As such, initialization methods for State implementations may safely assume
@@ -21,6 +31,22 @@ type State interface {
// Refresh clears container and pod states after a reboot
Refresh() error
+ // GetDBConfig retrieves several paths configured within the database
+ // when it was created - namely, Libpod root and tmp dirs, c/storage
+ // root and tmp dirs, and c/storage graph driver.
+ // This is not implemented by the in-memory state, as it has no need to
+ // validate runtime configuration.
+ GetDBConfig() (*DBConfig, error)
+
+ // ValidateDBConfig validates the config in the given Runtime struct
+ // against paths stored in the configured database.
+ // Libpod root and tmp dirs and c/storage root and tmp dirs and graph
+ // driver are validated.
+ // This is not implemented by the in-memory state, as it has no need to
+ // validate runtime configuration that may change over multiple runs of
+ // the program.
+ ValidateDBConfig(runtime *Runtime) error
+
// SetNamespace() sets the namespace for the store, and will determine
// what containers are retrieved with container and pod retrieval calls.
// A namespace of "", the empty string, acts as no namespace, and
@@ -127,4 +153,27 @@ type State interface {
// If a namespace has been set, only pods in that namespace will be
// returned.
AllPods() ([]*Pod, error)
+
+ // Volume accepts full name of volume
+ // If the volume doesn't exist, an error will be returned
+ Volume(volName string) (*Volume, error)
+ // HasVolume returns true if volName exists in the state,
+ // otherwise it returns false
+ HasVolume(volName string) (bool, error)
+ // VolumeInUse goes through the container dependencies of a volume
+ // and checks if the volume is being used by any container. If it is
+ // a slice of container IDs using the volume is returned
+ VolumeInUse(volume *Volume) ([]string, error)
+ // AddVolume adds the specified volume to state. The volume's name
+ // must be unique within the list of existing volumes
+ AddVolume(volume *Volume) error
+ // RemoveVolCtrDep updates the list of container dependencies that the
+ // volume has. It either deletes the dependent container ID from
+ // the sub-bucket
+ RemoveVolCtrDep(volume *Volume, ctrID string) error
+ // RemoveVolume removes the specified volume.
+ // Only volumes that have no container dependencies can be removed
+ RemoveVolume(volume *Volume) error
+ // AllVolumes returns all the volumes available in the state
+ AllVolumes() ([]*Volume, error)
}
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 04572fb29..d93a371f3 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -45,11 +45,16 @@ func getEmptyBoltState() (s State, p string, p2 string, err error) {
dbPath := filepath.Join(tmpDir, "db.sql")
lockDir := filepath.Join(tmpDir, "locks")
+ if err := os.Mkdir(lockDir, 0755); err != nil {
+ return nil, "", "", err
+ }
+
runtime := new(Runtime)
runtime.config = new(RuntimeConfig)
runtime.config.StorageConfig = storage.StoreOptions{}
+ runtime.lockDir = lockDir
- state, err := NewBoltState(dbPath, lockDir, runtime)
+ state, err := NewBoltState(dbPath, runtime)
if err != nil {
return nil, "", "", err
}
diff --git a/libpod/testdata/config.toml b/libpod/testdata/config.toml
index e19d36017..1d78f2083 100644
--- a/libpod/testdata/config.toml
+++ b/libpod/testdata/config.toml
@@ -14,7 +14,7 @@
seccomp_profile = "/etc/crio/seccomp.json"
apparmor_profile = "crio-default"
cgroup_manager = "cgroupfs"
- hooks_dir_path = "/usr/share/containers/oci/hooks.d"
+ hooks_dir = ["/usr/share/containers/oci/hooks.d"]
pids_limit = 2048
container_exits_dir = "/var/run/podman/exits"
[crio.image]
diff --git a/libpod/util.go b/libpod/util.go
index 7007b29cd..b7578135a 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -9,10 +9,9 @@ import (
"strings"
"time"
- "github.com/containerd/cgroups"
"github.com/containers/image/signature"
"github.com/containers/image/types"
- "github.com/containers/libpod/pkg/util"
+ "github.com/fsnotify/fsnotify"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@@ -90,31 +89,64 @@ func MountExists(specMounts []spec.Mount, dest string) bool {
}
// WaitForFile waits until a file has been created or the given timeout has occurred
-func WaitForFile(path string, timeout time.Duration) error {
+func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, error) {
done := make(chan struct{})
chControl := make(chan struct{})
+
+ var inotifyEvents chan fsnotify.Event
+ var timer chan struct{}
+ watcher, err := fsnotify.NewWatcher()
+ if err == nil {
+ if err := watcher.Add(filepath.Dir(path)); err == nil {
+ inotifyEvents = watcher.Events
+ }
+ defer watcher.Close()
+ }
+ if inotifyEvents == nil {
+ // If for any reason we fail to create the inotify
+ // watcher, fallback to polling the file
+ timer = make(chan struct{})
+ go func() {
+ select {
+ case <-chControl:
+ close(timer)
+ return
+ default:
+ time.Sleep(25 * time.Millisecond)
+ timer <- struct{}{}
+ }
+ }()
+ }
+
go func() {
for {
select {
case <-chControl:
return
- default:
+ case <-timer:
+ _, err := os.Stat(path)
+ if err == nil {
+ close(done)
+ return
+ }
+ case <-inotifyEvents:
_, err := os.Stat(path)
if err == nil {
close(done)
return
}
- time.Sleep(25 * time.Millisecond)
}
}
}()
select {
+ case e := <-chWait:
+ return true, e
case <-done:
- return nil
+ return false, nil
case <-time.After(timeout):
close(chControl)
- return errors.Wrapf(ErrInternal, "timed out waiting for file %s", path)
+ return false, errors.Wrapf(ErrInternal, "timed out waiting for file %s", path)
}
}
@@ -155,26 +187,3 @@ func validPodNSOption(p *Pod, ctrPod string) error {
}
return nil
}
-
-// GetV1CGroups gets the V1 cgroup subsystems and then "filters"
-// out any subsystems that are provided by the caller. Passing nil
-// for excludes will return the subsystems unfiltered.
-//func GetV1CGroups(excludes []string) ([]cgroups.Subsystem, error) {
-func GetV1CGroups(excludes []string) cgroups.Hierarchy {
- return func() ([]cgroups.Subsystem, error) {
- var filtered []cgroups.Subsystem
-
- subSystem, err := cgroups.V1()
- if err != nil {
- return nil, err
- }
- for _, s := range subSystem {
- // If the name of the subsystem is not in the list of excludes, then
- // add it as a keeper.
- if !util.StringInSlice(string(s.Name()), excludes) {
- filtered = append(filtered, s)
- }
- }
- return filtered, nil
- }
-}
diff --git a/libpod/util_linux.go b/libpod/util_linux.go
index 0cd486379..30e2538c3 100644
--- a/libpod/util_linux.go
+++ b/libpod/util_linux.go
@@ -7,6 +7,7 @@ import (
"strings"
"github.com/containerd/cgroups"
+ "github.com/containers/libpod/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -67,3 +68,26 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) {
return final, nil
}
+
+// GetV1CGroups gets the V1 cgroup subsystems and then "filters"
+// out any subsystems that are provided by the caller. Passing nil
+// for excludes will return the subsystems unfiltered.
+//func GetV1CGroups(excludes []string) ([]cgroups.Subsystem, error) {
+func GetV1CGroups(excludes []string) cgroups.Hierarchy {
+ return func() ([]cgroups.Subsystem, error) {
+ var filtered []cgroups.Subsystem
+
+ subSystem, err := cgroups.V1()
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range subSystem {
+ // If the name of the subsystem is not in the list of excludes, then
+ // add it as a keeper.
+ if !util.StringInSlice(string(s.Name()), excludes) {
+ filtered = append(filtered, s)
+ }
+ }
+ return filtered, nil
+ }
+}
diff --git a/libpod/volume.go b/libpod/volume.go
new file mode 100644
index 000000000..b732e8aa7
--- /dev/null
+++ b/libpod/volume.go
@@ -0,0 +1,63 @@
+package libpod
+
+import "github.com/containers/storage"
+
+// Volume is the type used to create named volumes
+// TODO: all volumes should be created using this and the Volume API
+type Volume struct {
+ config *VolumeConfig
+
+ valid bool
+ runtime *Runtime
+ lock storage.Locker
+}
+
+// VolumeConfig holds the volume's config information
+//easyjson:json
+type VolumeConfig struct {
+ Name string `json:"name"`
+ Labels map[string]string `json:"labels"`
+ MountPoint string `json:"mountPoint"`
+ Driver string `json:"driver"`
+ Options map[string]string `json:"options"`
+ Scope string `json:"scope"`
+}
+
+// Name retrieves the volume's name
+func (v *Volume) Name() string {
+ return v.config.Name
+}
+
+// Labels returns the volume's labels
+func (v *Volume) Labels() map[string]string {
+ labels := make(map[string]string)
+ for key, value := range v.config.Labels {
+ labels[key] = value
+ }
+ return labels
+}
+
+// MountPoint returns the volume's mountpoint on the host
+func (v *Volume) MountPoint() string {
+ return v.config.MountPoint
+}
+
+// Driver returns the volume's driver
+func (v *Volume) Driver() string {
+ return v.config.Driver
+}
+
+// Options return the volume's options
+func (v *Volume) Options() map[string]string {
+ options := make(map[string]string)
+ for key, value := range v.config.Options {
+ options[key] = value
+ }
+
+ return options
+}
+
+// Scope returns the scope of the volume
+func (v *Volume) Scope() string {
+ return v.config.Scope
+}
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
new file mode 100644
index 000000000..800e6d106
--- /dev/null
+++ b/libpod/volume_internal.go
@@ -0,0 +1,29 @@
+package libpod
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// VolumePath is the path under which all volumes that are created using the
+// local driver will be created
+// const VolumePath = "/var/lib/containers/storage/volumes"
+
+// Creates a new volume
+func newVolume(runtime *Runtime) (*Volume, error) {
+ volume := new(Volume)
+ volume.config = new(VolumeConfig)
+ volume.runtime = runtime
+ volume.config.Labels = make(map[string]string)
+ volume.config.Options = make(map[string]string)
+
+ return volume, nil
+}
+
+// teardownStorage deletes the volume from volumePath
+func (v *Volume) teardownStorage() error {
+ if !v.valid {
+ return ErrNoSuchVolume
+ }
+ return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
+}