summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go14
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/common_test.go16
-rw-r--r--libpod/container.go20
-rw-r--r--libpod/container_api.go17
-rw-r--r--libpod/container_attach_linux.go48
-rw-r--r--libpod/container_attach_linux_cgo.go11
-rw-r--r--libpod/container_attach_linux_nocgo.go7
-rw-r--r--libpod/container_attach_unsupported.go4
-rw-r--r--libpod/container_inspect.go101
-rw-r--r--libpod/container_internal.go64
-rw-r--r--libpod/container_internal_linux.go66
-rw-r--r--libpod/events.go55
-rw-r--r--libpod/events/config.go11
-rw-r--r--libpod/events/events.go2
-rw-r--r--libpod/events/journal_linux.go4
-rw-r--r--libpod/image/image.go94
-rw-r--r--libpod/image/pull.go4
-rw-r--r--libpod/kube.go164
-rw-r--r--libpod/lock/file/file_lock.go175
-rw-r--r--libpod/lock/file/file_lock_test.go74
-rw-r--r--libpod/lock/file_lock_manager.go110
-rw-r--r--libpod/lock/shm/shm_lock.go4
-rw-r--r--libpod/lock/shm/shm_lock_nocgo.go102
-rw-r--r--libpod/networking_linux.go34
-rw-r--r--libpod/oci.go22
-rw-r--r--libpod/oci_linux.go7
-rw-r--r--libpod/options.go11
-rw-r--r--libpod/runtime.go116
-rw-r--r--libpod/runtime_ctr.go25
-rw-r--r--libpod/runtime_migrate.go4
-rw-r--r--libpod/runtime_pod_linux.go22
-rw-r--r--libpod/stats.go11
-rw-r--r--libpod/util.go20
34 files changed, 1080 insertions, 366 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index b047c9fa0..4dda3a7f0 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -381,11 +381,6 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
defer s.closeDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
- idBucket, err := getIDBucket(tx)
- if err != nil {
- return err
- }
-
ctrBucket, err := getCtrBucket(tx)
if err != nil {
return err
@@ -436,7 +431,7 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
// We were not given a full container ID or name.
// Search for partial ID matches.
exists := false
- err = idBucket.ForEach(func(checkID, checkName []byte) error {
+ err = ctrBucket.ForEach(func(checkID, checkName []byte) error {
// If the container isn't in our namespace, we
// can't match it
if s.namespaceBytes != nil {
@@ -963,11 +958,6 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
defer s.closeDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
- idBucket, err := getIDBucket(tx)
- if err != nil {
- return err
- }
-
podBkt, err := getPodBucket(tx)
if err != nil {
return err
@@ -1015,7 +1005,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
// They did not give us a full pod name or ID.
// Search for partial ID matches.
exists := false
- err = idBucket.ForEach(func(checkID, checkName []byte) error {
+ err = podBkt.ForEach(func(checkID, checkName []byte) error {
// If the pod isn't in our namespace, we
// can't match it
if s.namespaceBytes != nil {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 122bb5935..ee2784cdd 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -339,7 +339,6 @@ func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
}
func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.Bucket) error {
- valid := true
ctrBkt := ctrsBkt.Bucket(id)
if ctrBkt == nil {
return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id))
@@ -386,7 +385,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
}
ctr.runtime = s.runtime
- ctr.valid = valid
+ ctr.valid = true
return nil
}
@@ -639,7 +638,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
}
// Add ctr to pod
- if pod != nil {
+ if pod != nil && podCtrs != nil {
if err := podCtrs.Put(ctrID, ctrName); err != nil {
return errors.Wrapf(err, "error adding container %s to pod %s", ctr.ID(), pod.ID())
}
@@ -737,7 +736,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
}
- if podDB != nil {
+ if podDB != nil && pod != nil {
// Check if the container is in the pod, remove it if it is
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
diff --git a/libpod/common_test.go b/libpod/common_test.go
index ae3cb1c87..93ca7bc71 100644
--- a/libpod/common_test.go
+++ b/libpod/common_test.go
@@ -89,13 +89,13 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
ctr.config.Labels["test"] = "testing"
- // Allocate a lock for the container
- lock, err := manager.AllocateLock()
+ // Allocate a containerLock for the container
+ containerLock, err := manager.AllocateLock()
if err != nil {
return nil, err
}
- ctr.lock = lock
- ctr.config.LockID = lock.ID()
+ ctr.lock = containerLock
+ ctr.config.LockID = containerLock.ID()
return ctr, nil
}
@@ -114,13 +114,13 @@ func getTestPod(id, name string, manager lock.Manager) (*Pod, error) {
valid: true,
}
- // Allocate a lock for the pod
- lock, err := manager.AllocateLock()
+ // Allocate a podLock for the pod
+ podLock, err := manager.AllocateLock()
if err != nil {
return nil, err
}
- pod.lock = lock
- pod.config.LockID = lock.ID()
+ pod.lock = podLock
+ pod.config.LockID = podLock.ID()
return pod, nil
}
diff --git a/libpod/container.go b/libpod/container.go
index 713386477..bfbc47d76 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -168,6 +168,8 @@ type ContainerState struct {
OOMKilled bool `json:"oomKilled,omitempty"`
// PID is the PID of a running container
PID int `json:"pid,omitempty"`
+ // ConmonPID is the PID of the container's conmon
+ ConmonPID int `json:"conmonPid,omitempty"`
// ExecSessions contains active exec sessions for container
// Exec session ID is mapped to PID of exec process
ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"`
@@ -849,7 +851,7 @@ func (c *Container) OOMKilled() (bool, error) {
return c.state.OOMKilled, nil
}
-// PID returns the PID of the container
+// PID returns the PID of the container.
// If the container is not running, a pid of 0 will be returned. No error will
// occur.
func (c *Container) PID() (int, error) {
@@ -865,6 +867,22 @@ func (c *Container) PID() (int, error) {
return c.state.PID, nil
}
+// ConmonPID Returns the PID of the container's conmon process.
+// If the container is not running, a PID of 0 will be returned. No error will
+// occur.
+func (c *Container) ConmonPID() (int, error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return -1, err
+ }
+ }
+
+ return c.state.ConmonPID, nil
+}
+
// ExecSessions retrieves active exec sessions running in the container
func (c *Container) ExecSessions() ([]string, error) {
if !c.batched {
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 8d1e5751b..3dd84b02c 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -7,7 +7,6 @@ import (
"io/ioutil"
"os"
"strconv"
- "sync"
"time"
"github.com/containers/libpod/libpod/define"
@@ -120,20 +119,24 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
attachChan := make(chan error)
// We need to ensure that we don't return until start() fired in attach.
- // Use a WaitGroup to sync this.
- wg := new(sync.WaitGroup)
- wg.Add(1)
+ // Use a channel to sync
+ startedChan := make(chan bool)
// Attach to the container before starting it
go func() {
- if err := c.attach(streams, keys, resize, true, wg); err != nil {
+ if err := c.attach(streams, keys, resize, true, startedChan); err != nil {
attachChan <- err
}
close(attachChan)
}()
- wg.Wait()
- c.newContainerEvent(events.Attach)
+ select {
+ case err := <-attachChan:
+ return nil, err
+ case <-startedChan:
+ c.newContainerEvent(events.Attach)
+ }
+
return attachChan, nil
}
diff --git a/libpod/container_attach_linux.go b/libpod/container_attach_linux.go
index 5293480f0..17b09fccc 100644
--- a/libpod/container_attach_linux.go
+++ b/libpod/container_attach_linux.go
@@ -8,7 +8,6 @@ import (
"net"
"os"
"path/filepath"
- "sync"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/kubeutils"
@@ -20,10 +19,6 @@ import (
"k8s.io/client-go/tools/remotecommand"
)
-//#include <sys/un.h>
-// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
-import "C"
-
/* Sync with stdpipe_t in conmon.c */
const (
AttachPipeStdin = 1
@@ -33,32 +28,35 @@ const (
// Attach to the given container
// Does not check if state is appropriate
-func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, wg *sync.WaitGroup) error {
+func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error {
if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
}
- // Check the validity of the provided keys first
- var err error
- detachKeys := []byte{}
- if len(keys) > 0 {
- detachKeys, err = term.ToBytes(keys)
- if err != nil {
- return errors.Wrapf(err, "invalid detach keys")
- }
- }
-
logrus.Debugf("Attaching to container %s", c.ID())
- return c.attachContainerSocket(resize, detachKeys, streams, startContainer, wg)
+ return c.attachContainerSocket(resize, keys, streams, startContainer, started)
}
// attachContainerSocket connects to the container's attach socket and deals with the IO.
-// wg is only required if startContainer is true
+// started is only required if startContainer is true
// TODO add a channel to allow interrupting
-func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSize, detachKeys []byte, streams *AttachStreams, startContainer bool, wg *sync.WaitGroup) error {
- if startContainer && wg == nil {
- return errors.Wrapf(define.ErrInternal, "wait group not passed when startContainer set")
+func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSize, keys string, streams *AttachStreams, startContainer bool, started chan bool) error {
+ if startContainer && started == nil {
+ return errors.Wrapf(define.ErrInternal, "started chan not passed when startContainer set")
+ }
+
+ // Use default detach keys when keys aren't passed or specified in libpod.conf
+ if len(keys) == 0 {
+ keys = DefaultDetachKeys
+ }
+
+ // Check the validity of the provided keys
+ detachKeys := []byte{}
+ var err error
+ detachKeys, err = term.ToBytes(keys)
+ if err != nil {
+ return errors.Wrapf(err, "invalid detach keys")
}
kubeutils.HandleResizing(resize, func(size remotecommand.TerminalSize) {
@@ -78,7 +76,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
socketPath := c.AttachSocketPath()
- maxUnixLength := int(C.unix_path_length())
+ maxUnixLength := unixPathLength()
if maxUnixLength < len(socketPath) {
socketPath = socketPath[0:maxUnixLength]
}
@@ -97,7 +95,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
if err := c.start(); err != nil {
return err
}
- wg.Done()
+ started <- true
}
receiveStdoutError := make(chan error)
@@ -147,7 +145,9 @@ func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeO
default:
logrus.Infof("Received unexpected attach type %+d", buf[0])
}
-
+ if dst == nil {
+ return errors.New("output destination cannot be nil")
+ }
if doWrite {
nw, ew := dst.Write(buf[1:nr])
if ew != nil {
diff --git a/libpod/container_attach_linux_cgo.go b/libpod/container_attach_linux_cgo.go
new file mode 100644
index 000000000..d81243360
--- /dev/null
+++ b/libpod/container_attach_linux_cgo.go
@@ -0,0 +1,11 @@
+//+build linux,cgo
+
+package libpod
+
+//#include <sys/un.h>
+// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
+import "C"
+
+func unixPathLength() int {
+ return int(C.unix_path_length())
+}
diff --git a/libpod/container_attach_linux_nocgo.go b/libpod/container_attach_linux_nocgo.go
new file mode 100644
index 000000000..a514a555d
--- /dev/null
+++ b/libpod/container_attach_linux_nocgo.go
@@ -0,0 +1,7 @@
+//+build linux,!cgo
+
+package libpod
+
+func unixPathLength() int {
+ return 107
+}
diff --git a/libpod/container_attach_unsupported.go b/libpod/container_attach_unsupported.go
index 2c8718c67..c27ce0799 100644
--- a/libpod/container_attach_unsupported.go
+++ b/libpod/container_attach_unsupported.go
@@ -3,12 +3,10 @@
package libpod
import (
- "sync"
-
"github.com/containers/libpod/libpod/define"
"k8s.io/client-go/tools/remotecommand"
)
-func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, wg *sync.WaitGroup) error {
+func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error {
return define.ErrNotImplemented
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 6085f1210..2de78254c 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -145,6 +145,7 @@ type InspectContainerState struct {
OOMKilled bool `json:"OOMKilled"`
Dead bool `json:"Dead"`
Pid int `json:"Pid"`
+ ConmonPid int `json:"ConmonPid,omitempty"`
ExitCode int32 `json:"ExitCode"`
Error string `json:"Error"` // TODO
StartedAt time.Time `json:"StartedAt"`
@@ -205,12 +206,12 @@ func (c *Container) Inspect(size bool) (*InspectContainerData, error) {
func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*InspectContainerData, error) {
config := c.config
runtimeInfo := c.state
- spec, err := c.specFromState()
+ stateSpec, err := c.specFromState()
if err != nil {
return nil, err
}
- // Process is allowed to be nil in the spec
+ // Process is allowed to be nil in the stateSpec
args := []string{}
if config.Spec.Process != nil {
args = config.Spec.Process.Args
@@ -243,7 +244,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
}
}
- mounts, err := c.getInspectMounts(spec)
+ mounts, err := c.getInspectMounts(stateSpec)
if err != nil {
return nil, err
}
@@ -254,13 +255,14 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
Path: path,
Args: args,
State: &InspectContainerState{
- OciVersion: spec.Version,
+ OciVersion: stateSpec.Version,
Status: runtimeInfo.State.String(),
Running: runtimeInfo.State == define.ContainerStateRunning,
Paused: runtimeInfo.State == define.ContainerStatePaused,
OOMKilled: runtimeInfo.OOMKilled,
Dead: runtimeInfo.State.String() == "bad state",
Pid: runtimeInfo.PID,
+ ConmonPid: runtimeInfo.ConmonPID,
ExitCode: runtimeInfo.ExitCode,
Error: "", // can't get yet
StartedAt: runtimeInfo.StartedTime,
@@ -283,9 +285,9 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
Driver: driverData.Name,
MountLabel: config.MountLabel,
ProcessLabel: config.ProcessLabel,
- EffectiveCaps: spec.Process.Capabilities.Effective,
- BoundingCaps: spec.Process.Capabilities.Bounding,
- AppArmorProfile: spec.Process.ApparmorProfile,
+ EffectiveCaps: stateSpec.Process.Capabilities.Effective,
+ BoundingCaps: stateSpec.Process.Capabilities.Bounding,
+ AppArmorProfile: stateSpec.Process.ApparmorProfile,
ExecIDs: execIDs,
GraphDriver: driverData,
Mounts: mounts,
@@ -336,7 +338,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
// Get information on the container's network namespace (if present)
data = c.getContainerNetworkInfo(data)
- inspectConfig, err := c.generateInspectContainerConfig(spec)
+ inspectConfig, err := c.generateInspectContainerConfig(stateSpec)
if err != nil {
return nil, err
}
@@ -368,58 +370,41 @@ func (c *Container) getInspectMounts(ctrSpec *spec.Spec) ([]InspectMount, error)
return inspectMounts, nil
}
- // We need to parse all named volumes and mounts into maps, so we don't
- // end up with repeated lookups for each user volume.
- // Map destination to struct, as destination is what is stored in
- // UserVolumes.
- namedVolumes := make(map[string]*ContainerNamedVolume)
- mounts := make(map[string]spec.Mount)
- for _, namedVol := range c.config.NamedVolumes {
- namedVolumes[namedVol.Dest] = namedVol
- }
- for _, mount := range ctrSpec.Mounts {
- mounts[mount.Destination] = mount
- }
+ namedVolumes, mounts := c.sortUserVolumes(ctrSpec)
+ for _, volume := range namedVolumes {
+ mountStruct := InspectMount{}
+ mountStruct.Type = "volume"
+ mountStruct.Destination = volume.Dest
+ mountStruct.Name = volume.Name
+
+ // For src and driver, we need to look up the named
+ // volume.
+ volFromDB, err := c.runtime.state.Volume(volume.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
+ }
+ mountStruct.Driver = volFromDB.Driver()
+ mountStruct.Source = volFromDB.MountPoint()
- for _, vol := range c.config.UserVolumes {
- // We need to look up the volumes.
- // First: is it a named volume?
- if volume, ok := namedVolumes[vol]; ok {
- mountStruct := InspectMount{}
- mountStruct.Type = "volume"
- mountStruct.Destination = volume.Dest
- mountStruct.Name = volume.Name
-
- // For src and driver, we need to look up the named
- // volume.
- volFromDB, err := c.runtime.state.Volume(volume.Name)
- if err != nil {
- return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
- }
- mountStruct.Driver = volFromDB.Driver()
- mountStruct.Source = volFromDB.MountPoint()
-
- parseMountOptionsForInspect(volume.Options, &mountStruct)
-
- inspectMounts = append(inspectMounts, mountStruct)
- } else if mount, ok := mounts[vol]; ok {
- // It's a mount.
- // Is it a tmpfs? If so, discard.
- if mount.Type == "tmpfs" {
- continue
- }
-
- mountStruct := InspectMount{}
- mountStruct.Type = "bind"
- mountStruct.Source = mount.Source
- mountStruct.Destination = mount.Destination
-
- parseMountOptionsForInspect(mount.Options, &mountStruct)
-
- inspectMounts = append(inspectMounts, mountStruct)
+ parseMountOptionsForInspect(volume.Options, &mountStruct)
+
+ inspectMounts = append(inspectMounts, mountStruct)
+ }
+ for _, mount := range mounts {
+ // It's a mount.
+ // Is it a tmpfs? If so, discard.
+ if mount.Type == "tmpfs" {
+ continue
}
- // We couldn't find a mount. Log a warning.
- logrus.Warnf("Could not find mount at destination %q when building inspect output for container %s", vol, c.ID())
+
+ mountStruct := InspectMount{}
+ mountStruct.Type = "bind"
+ mountStruct.Source = mount.Source
+ mountStruct.Destination = mount.Destination
+
+ parseMountOptionsForInspect(mount.Options, &mountStruct)
+
+ inspectMounts = append(inspectMounts, mountStruct)
}
return inspectMounts, nil
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 43d2b6e61..1cac7b003 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -452,6 +452,7 @@ func (c *Container) teardownStorage() error {
// It does not save the results - assumes the database will do that for us
func resetState(state *ContainerState) error {
state.PID = 0
+ state.ConmonPID = 0
state.Mountpoint = ""
state.Mounted = false
if state.State != define.ContainerStateExited {
@@ -554,7 +555,7 @@ func (c *Container) removeConmonFiles() error {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
}
- } else if err == nil {
+ } else {
// Rename should replace the old exit file (if it exists)
if err := os.Rename(exitFile, oldExitFile); err != nil {
return errors.Wrapf(err, "error renaming container %s exit file", c.ID())
@@ -567,11 +568,11 @@ func (c *Container) removeConmonFiles() error {
func (c *Container) export(path string) error {
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
- mount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
+ containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
if err != nil {
return errors.Wrapf(err, "error mounting container %q", c.ID())
}
- mountPoint = mount
+ mountPoint = containerMount
defer func() {
if _, err := c.runtime.store.Unmount(c.ID(), false); err != nil {
logrus.Errorf("error unmounting container %q: %v", c.ID(), err)
@@ -609,7 +610,7 @@ func (c *Container) isStopped() (bool, error) {
if err != nil {
return true, err
}
- return (c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused), nil
+ return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil
}
// save container state to the database
@@ -855,18 +856,18 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
span.SetTag("struct", "container")
defer span.Finish()
- // Generate the OCI spec
- spec, err := c.generateSpec(ctx)
+ // Generate the OCI newSpec
+ newSpec, err := c.generateSpec(ctx)
if err != nil {
return err
}
- // Save the OCI spec to disk
- if err := c.saveSpec(spec); err != nil {
+ // Save the OCI newSpec to disk
+ if err := c.saveSpec(newSpec); err != nil {
return err
}
- // With the spec complete, do an OCI create
+ // With the newSpec complete, do an OCI create
if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil {
return err
}
@@ -1043,6 +1044,8 @@ func (c *Container) stop(timeout uint) error {
return err
}
+ c.state.PID = 0
+ c.state.ConmonPID = 0
c.state.StoppedByUser = true
if err := c.save(); err != nil {
return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())
@@ -1164,8 +1167,8 @@ func (c *Container) cleanupStorage() error {
return nil
}
- for _, mount := range c.config.Mounts {
- if err := c.unmountSHM(mount); err != nil {
+ for _, containerMount := range c.config.Mounts {
+ if err := c.unmountSHM(containerMount); err != nil {
return err
}
}
@@ -1396,14 +1399,14 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten
}
return nil, err
}
- hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
+ ociHooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
if err != nil {
return nil, err
}
- if len(hooks) > 0 || config.Hooks != nil {
- logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir)
+ if len(ociHooks) > 0 || config.Hooks != nil {
+ logrus.Warnf("implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
}
- for i, hook := range hooks {
+ for i, hook := range ociHooks {
allHooks[i] = hook
}
}
@@ -1534,3 +1537,34 @@ func (c *Container) prepareCheckpointExport() (err error) {
return nil
}
+
+// sortUserVolumes sorts the volumes specified for a container
+// between named and normal volumes
+func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) {
+ namedUserVolumes := []*ContainerNamedVolume{}
+ userMounts := []spec.Mount{}
+
+ // We need to parse all named volumes and mounts into maps, so we don't
+ // end up with repeated lookups for each user volume.
+ // Map destination to struct, as destination is what is stored in
+ // UserVolumes.
+ namedVolumes := make(map[string]*ContainerNamedVolume)
+ mounts := make(map[string]spec.Mount)
+ for _, namedVol := range c.config.NamedVolumes {
+ namedVolumes[namedVol.Dest] = namedVol
+ }
+ for _, mount := range ctrSpec.Mounts {
+ mounts[mount.Destination] = mount
+ }
+
+ for _, vol := range c.config.UserVolumes {
+ if volume, ok := namedVolumes[vol]; ok {
+ namedUserVolumes = append(namedUserVolumes, volume)
+ } else if mount, ok := mounts[vol]; ok {
+ userMounts = append(userMounts, mount)
+ } else {
+ logrus.Warnf("Could not find mount at destination %q when parsing user volumes for container %s", vol, c.ID())
+ }
+ }
+ return namedUserVolumes, userMounts
+}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index fad45233a..686a595de 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -185,9 +185,13 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// If network namespace was requested, add it now
if c.config.CreateNetNS {
if c.config.PostConfigureNetNS {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, "")
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, ""); err != nil {
+ return nil, err
+ }
} else {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil {
+ return nil, err
+ }
}
}
@@ -415,7 +419,9 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if rootPropagation != "" {
logrus.Debugf("set root propagation to %q", rootPropagation)
- g.SetLinuxRootPropagation(rootPropagation)
+ if err := g.SetLinuxRootPropagation(rootPropagation); err != nil {
+ return nil, err
+ }
}
// Warning: precreate hooks may alter g.Config in place.
@@ -561,7 +567,9 @@ func (c *Container) checkpointRestoreLabelLog(fileName string) (err error) {
if err != nil {
return errors.Wrapf(err, "failed to create CRIU log file %q", dumpLog)
}
- logFile.Close()
+ if err := logFile.Close(); err != nil {
+ logrus.Errorf("unable to close log file: %q", err)
+ }
if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
return errors.Wrapf(err, "failed to label CRIU log file %q", dumpLog)
}
@@ -620,9 +628,11 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
"config.dump",
"spec.dump",
}
- for _, delete := range cleanup {
- file := filepath.Join(c.bundlePath(), delete)
- os.Remove(file)
+ for _, del := range cleanup {
+ file := filepath.Join(c.bundlePath(), del)
+ if err := os.Remove(file); err != nil {
+ logrus.Debugf("unable to remove file %s", file)
+ }
}
}
@@ -702,7 +712,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
return err
}
- json.Unmarshal(networkJSON, &networkStatus)
+ if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
+ return err
+ }
// Take the first IP address
var IP net.IP
if len(networkStatus) > 0 {
@@ -744,7 +756,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// We want to have the same network namespace as before.
if c.config.CreateNetNS {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil {
+ return err
+ }
}
if err := c.makeBindMounts(); err != nil {
@@ -769,7 +783,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
// Cleanup for a working restore.
- c.removeConmonFiles()
+ if err := c.removeConmonFiles(); err != nil {
+ return err
+ }
// Save the OCI spec to disk
if err := c.saveSpec(g.Spec()); err != nil {
@@ -793,8 +809,8 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
logrus.Debugf("Non-fatal: removal of checkpoint directory (%s) failed: %v", c.CheckpointPath(), err)
}
cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status"}
- for _, delete := range cleanup {
- file := filepath.Join(c.bundlePath(), delete)
+ for _, del := range cleanup {
+ file := filepath.Join(c.bundlePath(), del)
err = os.Remove(file)
if err != nil {
logrus.Debugf("Non-fatal: removal of checkpoint file (%s) failed: %v", file, err)
@@ -824,14 +840,14 @@ func (c *Container) makeBindMounts() error {
// will recreate. Only do this if we aren't sharing them with
// another container.
if c.config.NetNsCtr == "" {
- if path, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
- if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
+ if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error removing container %s resolv.conf", c.ID())
}
delete(c.state.BindMounts, "/etc/resolv.conf")
}
- if path, ok := c.state.BindMounts["/etc/hosts"]; ok {
- if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
+ if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error removing container %s hosts", c.ID())
}
delete(c.state.BindMounts, "/etc/hosts")
@@ -968,10 +984,10 @@ func (c *Container) makeBindMounts() error {
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() (string, error) {
resolvConf := "/etc/resolv.conf"
- for _, ns := range c.config.Spec.Linux.Namespaces {
- if ns.Type == spec.NetworkNamespace {
- if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
- definedPath := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
+ for _, namespace := range c.config.Spec.Linux.Namespaces {
+ if namespace.Type == spec.NetworkNamespace {
+ if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") {
+ definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf")
_, err := os.Stat(definedPath)
if err == nil {
resolvConf = definedPath
@@ -1096,10 +1112,10 @@ func (c *Container) generatePasswd() (string, error) {
if c.config.User == "" {
return "", nil
}
- spec := strings.SplitN(c.config.User, ":", 2)
- userspec := spec[0]
- if len(spec) > 1 {
- groupspec = spec[1]
+ splitSpec := strings.SplitN(c.config.User, ":", 2)
+ userspec := splitSpec[0]
+ if len(splitSpec) > 1 {
+ groupspec = splitSpec[1]
}
// If a non numeric User, then don't generate passwd
uid, err := strconv.ParseUint(userspec, 10, 32)
@@ -1137,7 +1153,7 @@ func (c *Container) generatePasswd() (string, error) {
if err != nil {
return "", errors.Wrapf(err, "failed to create temporary passwd file")
}
- if os.Chmod(passwdFile, 0644); err != nil {
+ if err := os.Chmod(passwdFile, 0644); err != nil {
return "", err
}
return passwdFile, nil
diff --git a/libpod/events.go b/libpod/events.go
index 13bb5bdde..be21e510a 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -1,7 +1,10 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/libpod/libpod/events"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -79,3 +82,55 @@ func (r *Runtime) Events(options events.ReadOptions) error {
}
return eventer.Read(options)
}
+
+// GetEvents reads the event log and returns events based on input filters
+func (r *Runtime) GetEvents(filters []string) ([]*events.Event, error) {
+ var (
+ logEvents []*events.Event
+ readErr error
+ )
+ eventChannel := make(chan *events.Event)
+ options := events.ReadOptions{
+ EventChannel: eventChannel,
+ Filters: filters,
+ FromStart: true,
+ Stream: false,
+ }
+ eventer, err := r.newEventer()
+ if err != nil {
+ return nil, err
+ }
+ go func() {
+ readErr = eventer.Read(options)
+ }()
+ if readErr != nil {
+ return nil, readErr
+ }
+ for e := range eventChannel {
+ logEvents = append(logEvents, e)
+ }
+ return logEvents, nil
+}
+
+// GetLastContainerEvent takes a container name or ID and an event status and returns
+// the last occurrence of the container event
+func (r *Runtime) GetLastContainerEvent(nameOrID string, containerEvent events.Status) (*events.Event, error) {
+ // check to make sure the event.Status is valid
+ if _, err := events.StringToStatus(containerEvent.String()); err != nil {
+ return nil, err
+ }
+ filters := []string{
+ fmt.Sprintf("container=%s", nameOrID),
+ fmt.Sprintf("event=%s", containerEvent),
+ "type=container",
+ }
+ containerEvents, err := r.GetEvents(filters)
+ if err != nil {
+ return nil, err
+ }
+ if len(containerEvents) < 1 {
+ return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String())
+ }
+ // return the last element in the slice
+ return containerEvents[len(containerEvents)-1], nil
+}
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 810988205..b9f01f3a5 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -2,6 +2,8 @@ package events
import (
"time"
+
+ "github.com/pkg/errors"
)
// EventerType ...
@@ -158,3 +160,12 @@ const (
// EventFilter for filtering events
type EventFilter func(*Event) bool
+
+var (
+ // ErrEventTypeBlank indicates the event log found something done by podman
+ // but it isnt likely an event
+ ErrEventTypeBlank = errors.New("event type blank")
+
+ // ErrEventNotFound indicates that the event was not found in the event log
+ ErrEventNotFound = errors.New("unable to find event")
+)
diff --git a/libpod/events/events.go b/libpod/events/events.go
index 1ec79bcd7..2bebff162 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -95,6 +95,8 @@ func StringToType(name string) (Type, error) {
return System, nil
case Volume.String():
return Volume, nil
+ case "":
+ return "", ErrEventTypeBlank
}
return "", errors.Errorf("unknown event type %q", name)
}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 78a630e9a..d5bce4334 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -101,7 +101,9 @@ func (e EventJournalD) Read(options ReadOptions) error {
// We can't decode this event.
// Don't fail hard - that would make events unusable.
// Instead, log and continue.
- logrus.Errorf("Unable to decode event: %v", err)
+ if errors.Cause(err) != ErrEventTypeBlank {
+ logrus.Errorf("Unable to decode event: %v", err)
+ }
continue
}
include := true
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 89a68a1bd..76e46f74f 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -660,11 +660,7 @@ func (i *Image) Size(ctx context.Context) (*uint64, error) {
// DriverData gets the driver data from the store on a layer
func (i *Image) DriverData() (*driver.Data, error) {
- topLayer, err := i.Layer()
- if err != nil {
- return nil, err
- }
- return driver.GetDriverData(i.imageruntime.store, topLayer.ID)
+ return driver.GetDriverData(i.imageruntime.store, i.TopLayer())
}
// Layer returns the image's top layer
@@ -693,13 +689,17 @@ func (i *Image) History(ctx context.Context) ([]*History, error) {
return nil, err
}
- // Use our layers list to find images that use one of them as its
+ // Use our layers list to find images that use any of them (or no
+ // layer, since every base layer is derived from an empty layer) as its
// topmost layer.
interestingLayers := make(map[string]bool)
- layer, err := i.imageruntime.store.Layer(i.TopLayer())
- if err != nil {
- return nil, err
+ var layer *storage.Layer
+ if i.TopLayer() != "" {
+ if layer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil {
+ return nil, err
+ }
}
+ interestingLayers[""] = true
for layer != nil {
interestingLayers[layer.ID] = true
if layer.Parent == "" {
@@ -795,27 +795,6 @@ func (i *Image) History(ctx context.Context) ([]*History, error) {
return allHistory, nil
}
-// historyLayerIDs goes through the images in store and checks if the top layer of an image
-// is the same as the parent of topLayerID
-func (i *Image) historyLayerIDs(topLayerID string, images []*Image, IDs *[]string) error {
- for _, image := range images {
- // Get the layer info of topLayerID
- layer, err := i.imageruntime.store.Layer(topLayerID)
- if err != nil {
- return errors.Wrapf(err, "error getting layer info %q", topLayerID)
- }
- // Check if the parent of layer is equal to the image's top layer
- // If so add the image ID to the list of IDs and find the parent of
- // the top layer of the image ID added to the list
- // Since we are checking for parent, each top layer can only have one parent
- if layer.Parent == image.TopLayer() {
- *IDs = append(*IDs, image.ID())
- return i.historyLayerIDs(image.TopLayer(), images, IDs)
- }
- }
- return nil
-}
-
// Dangling returns a bool if the image is "dangling"
func (i *Image) Dangling() bool {
return len(i.Names()) == 0
@@ -1143,13 +1122,15 @@ func areParentAndChild(parent, child *imgspecv1.Image) bool {
// GetParent returns the image ID of the parent. Return nil if a parent is not found.
func (i *Image) GetParent(ctx context.Context) (*Image, error) {
+ var childLayer *storage.Layer
images, err := i.imageruntime.GetImages()
if err != nil {
return nil, err
}
- childLayer, err := i.imageruntime.store.Layer(i.TopLayer())
- if err != nil {
- return nil, err
+ if i.TopLayer() != "" {
+ if childLayer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil {
+ return nil, err
+ }
}
// fetch the configuration for the child image
child, err := i.ociv1Image(ctx)
@@ -1161,11 +1142,23 @@ func (i *Image) GetParent(ctx context.Context) (*Image, error) {
continue
}
candidateLayer := img.TopLayer()
- // as a child, our top layer is either the candidate parent's
- // layer, or one that's derived from it, so skip over any
- // candidate image where we know that isn't the case
- if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID {
- continue
+ // as a child, our top layer, if we have one, is either the
+ // candidate parent's layer, or one that's derived from it, so
+ // skip over any candidate image where we know that isn't the
+ // case
+ if childLayer != nil {
+ // The child has at least one layer, so a parent would
+ // have a top layer that's either the same as the child's
+ // top layer or the top layer's recorded parent layer,
+ // which could be an empty value.
+ if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID {
+ continue
+ }
+ } else {
+ // The child has no layers, but the candidate does.
+ if candidateLayer != "" {
+ continue
+ }
}
// fetch the configuration for the candidate image
candidate, err := img.ociv1Image(ctx)
@@ -1204,14 +1197,22 @@ func (i *Image) getChildren(ctx context.Context, max int) ([]string, error) {
if img.ID() == i.ID() {
continue
}
- candidateLayer, err := img.Layer()
- if err != nil {
- return nil, err
- }
- // if this image's top layer is not our top layer, and is not
- // based on our top layer, we can skip it
- if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer {
- continue
+ if img.TopLayer() == "" {
+ if parentLayer != "" {
+ // this image has no layers, but we do, so
+ // it can't be derived from this one
+ continue
+ }
+ } else {
+ candidateLayer, err := img.Layer()
+ if err != nil {
+ return nil, err
+ }
+ // if this image's top layer is not our top layer, and is not
+ // based on our top layer, we can skip it
+ if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer {
+ continue
+ }
}
// fetch the configuration for the candidate image
candidate, err := img.ociv1Image(ctx)
@@ -1443,6 +1444,7 @@ func GetLayersMapWithImageInfo(imageruntime *Runtime) (map[string]*LayerInfo, er
if err != nil {
return nil, err
}
+ layerInfoMap[""] = &LayerInfo{}
for _, img := range imgs {
e, ok := layerInfoMap[img.TopLayer]
if !ok {
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 644a9ae86..e5765febc 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -19,8 +19,8 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/registries"
- multierror "github.com/hashicorp/go-multierror"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/hashicorp/go-multierror"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
diff --git a/libpod/kube.go b/libpod/kube.go
index 1622246d5..409937010 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -3,6 +3,7 @@ package libpod
import (
"fmt"
"math/rand"
+ "os"
"strconv"
"strings"
"time"
@@ -16,7 +17,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -132,32 +132,43 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
var (
podContainers []v1.Container
)
+ deDupPodVolumes := make(map[string]*v1.Volume)
first := true
for _, ctr := range containers {
if !ctr.IsInfra() {
- result, err := containerToV1Container(ctr)
+ ctr, volumes, err := containerToV1Container(ctr)
if err != nil {
return nil, err
}
// Since port bindings for the pod are handled by the
// infra container, wipe them here.
- result.Ports = nil
+ ctr.Ports = nil
// We add the original port declarations from the libpod infra container
// to the first kubernetes container description because otherwise we loose
// the original container/port bindings.
if first && len(ports) > 0 {
- result.Ports = ports
+ ctr.Ports = ports
first = false
}
- podContainers = append(podContainers, result)
+ podContainers = append(podContainers, ctr)
+ // Deduplicate volumes, so if containers in the pod share a volume, it's only
+ // listed in the volumes section once
+ for _, vol := range volumes {
+ deDupPodVolumes[vol.Name] = &vol
+ }
}
}
- return addContainersToPodObject(podContainers, p.Name()), nil
+ podVolumes := make([]v1.Volume, 0, len(deDupPodVolumes))
+ for _, vol := range deDupPodVolumes {
+ podVolumes = append(podVolumes, *vol)
+ }
+
+ return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name()), nil
}
-func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod {
+func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string) *v1.Pod {
tm := v12.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
@@ -177,6 +188,7 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
}
ps := v1.PodSpec{
Containers: containers,
+ Volumes: volumes,
}
p := v1.Pod{
TypeMeta: tm,
@@ -190,56 +202,58 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
// for a single container. we "insert" that container description in a pod.
func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
var containers []v1.Container
- result, err := containerToV1Container(ctr)
+ kubeCtr, kubeVols, err := containerToV1Container(ctr)
if err != nil {
return nil, err
}
- containers = append(containers, result)
- return addContainersToPodObject(containers, ctr.Name()), nil
+ containers = append(containers, kubeCtr)
+ return addContainersAndVolumesToPodObject(containers, kubeVols, ctr.Name()), nil
}
// containerToV1Container converts information we know about a libpod container
// to a V1.Container specification.
-func containerToV1Container(c *Container) (v1.Container, error) {
+func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
kubeContainer := v1.Container{}
+ kubeVolumes := []v1.Volume{}
kubeSec, err := generateKubeSecurityContext(c)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
if len(c.config.Spec.Linux.Devices) > 0 {
// TODO Enable when we can support devices and their names
devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
kubeContainer.VolumeDevices = devices
- return kubeContainer, errors.Wrapf(define.ErrNotImplemented, "linux devices")
+ return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices")
}
if len(c.config.UserVolumes) > 0 {
// TODO When we until we can resolve what the volume name should be, this is disabled
// Volume names need to be coordinated "globally" in the kube files.
- volumes, err := libpodMountsToKubeVolumeMounts(c)
+ volumeMounts, volumes, err := libpodMountsToKubeVolumeMounts(c)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
- kubeContainer.VolumeMounts = volumes
+ kubeContainer.VolumeMounts = volumeMounts
+ kubeVolumes = append(kubeVolumes, volumes...)
}
envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
portmappings, err := c.PortMappings()
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
ports, err := ocicniPortMappingToContainerPort(portmappings)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
containerCommands := c.Command()
@@ -263,7 +277,7 @@ func containerToV1Container(c *Container) (v1.Container, error) {
kubeContainer.StdinOnce = false
kubeContainer.TTY = c.config.Spec.Process.Terminal
- return kubeContainer, nil
+ return kubeContainer, kubeVolumes, nil
}
// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
@@ -309,52 +323,82 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
return envVars, nil
}
-// Is this worth it?
-func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceList) { //nolint
- // It does not appear we can properly calculate CPU resources from the information
- // we know in libpod. Libpod knows CPUs by time, shares, etc.
+// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
+func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, error) {
+ var vms []v1.VolumeMount
+ var vos []v1.Volume
- // We also only know about a memory limit; no memory minimum
- maxResources := make(map[v1.ResourceName]resource.Quantity)
- minResources := make(map[v1.ResourceName]resource.Quantity)
- config := c.Config()
- maxMem := config.Spec.Linux.Resources.Memory.Limit
+ // TjDO when named volumes are supported in play kube, also parse named volumes here
+ _, mounts := c.sortUserVolumes(c.config.Spec)
+ for _, m := range mounts {
+ vm, vo, err := generateKubeVolumeMount(m)
+ if err != nil {
+ return vms, vos, err
+ }
+ vms = append(vms, vm)
+ vos = append(vos, vo)
+ }
+ return vms, vos, nil
+}
- _ = maxMem
+// generateKubeVolumeMount takes a user specfied mount and returns
+// a kubernetes VolumeMount (to be added to the container) and a kubernetes Volume
+// (to be added to the pod)
+func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) {
+ vm := v1.VolumeMount{}
+ vo := v1.Volume{}
- return maxResources, minResources
+ name, err := convertVolumePathToName(m.Source)
+ if err != nil {
+ return vm, vo, err
+ }
+ vm.Name = name
+ vm.MountPath = m.Destination
+ if util.StringInSlice("ro", m.Options) {
+ vm.ReadOnly = true
+ }
+
+ vo.Name = name
+ vo.HostPath = &v1.HostPathVolumeSource{}
+ vo.HostPath.Path = m.Source
+ isDir, err := isHostPathDirectory(m.Source)
+ // neither a directory or a file lives here, default to creating a directory
+ // TODO should this be an error instead?
+ var hostPathType v1.HostPathType
+ if err != nil {
+ hostPathType = v1.HostPathDirectoryOrCreate
+ } else if isDir {
+ hostPathType = v1.HostPathDirectory
+ } else {
+ hostPathType = v1.HostPathFile
+ }
+ vo.HostPath.Type = &hostPathType
+
+ return vm, vo, nil
}
-func generateKubeVolumeMount(hostSourcePath string, mounts []specs.Mount) (v1.VolumeMount, error) {
- vm := v1.VolumeMount{}
- for _, m := range mounts {
- if m.Source == hostSourcePath {
- // TODO Name is not provided and is required by Kube; therefore, this is disabled earlier
- //vm.Name =
- vm.MountPath = m.Source
- vm.SubPath = m.Destination
- if util.StringInSlice("ro", m.Options) {
- vm.ReadOnly = true
- }
- return vm, nil
- }
+func isHostPathDirectory(hostPathSource string) (bool, error) {
+ info, err := os.Stat(hostPathSource)
+ if err != nil {
+ return false, err
}
- return vm, errors.New("unable to find mount source")
+ return info.Mode().IsDir(), nil
}
-// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
-func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) {
- // At this point, I dont think we can distinguish between the default
- // volume mounts and user added ones. For now, we pass them all.
- var vms []v1.VolumeMount
- for _, hostSourcePath := range c.config.UserVolumes {
- vm, err := generateKubeVolumeMount(hostSourcePath, c.config.Spec.Mounts)
- if err != nil {
- continue
+func convertVolumePathToName(hostSourcePath string) (string, error) {
+ if len(hostSourcePath) == 0 {
+ return "", errors.Errorf("hostSourcePath must be specified to generate volume name")
+ }
+ if len(hostSourcePath) == 1 {
+ if hostSourcePath != "/" {
+ return "", errors.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
}
- vms = append(vms, vm)
+ // add special case name
+ return "root", nil
}
- return vms, nil
+ // First, trim trailing slashes, then replace slashes with dashes.
+ // Thus, /mnt/data/ will become mnt-data
+ return strings.Replace(strings.Trim(hostSourcePath, "/"), "/", "-", -1), nil
}
func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
@@ -366,16 +410,14 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
// those indicate a dropped cap
for _, capability := range defaultCaps {
if !util.StringInSlice(capability, containerCaps) {
- cap := v1.Capability(capability)
- drop = append(drop, cap)
+ drop = append(drop, v1.Capability(capability))
}
}
// Find caps in the container but not in the defaults; those indicate
// an added cap
for _, capability := range containerCaps {
if !util.StringInSlice(capability, defaultCaps) {
- cap := v1.Capability(capability)
- add = append(add, cap)
+ add = append(add, v1.Capability(capability))
}
}
diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go
new file mode 100644
index 000000000..e50d67321
--- /dev/null
+++ b/libpod/lock/file/file_lock.go
@@ -0,0 +1,175 @@
+package file
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "syscall"
+
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// FileLocks is a struct enabling POSIX lock locking in a shared memory
+// segment.
+type FileLocks struct { // nolint
+ lockPath string
+ valid bool
+}
+
+// CreateFileLock sets up a directory containing the various lock files.
+func CreateFileLock(path string) (*FileLocks, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path)
+ }
+ if err := os.MkdirAll(path, 0711); err != nil {
+ return nil, errors.Wrapf(err, "cannot create %s", path)
+ }
+
+ locks := new(FileLocks)
+ locks.lockPath = path
+ locks.valid = true
+
+ return locks, nil
+}
+
+// OpenFileLock opens an existing directory with the lock files.
+func OpenFileLock(path string) (*FileLocks, error) {
+ _, err := os.Stat(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "accessing directory %s", path)
+ }
+
+ locks := new(FileLocks)
+ locks.lockPath = path
+ locks.valid = true
+
+ return locks, nil
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *FileLocks) Close() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ err := os.RemoveAll(locks.lockPath)
+ if err != nil {
+ return errors.Wrapf(err, "deleting directory %s", locks.lockPath)
+ }
+ return nil
+}
+
+func (locks *FileLocks) getLockPath(lck uint32) string {
+ return filepath.Join(locks.lockPath, strconv.FormatInt(int64(lck), 10))
+}
+
+// AllocateLock allocates a lock and returns the index of the lock that was allocated.
+func (locks *FileLocks) AllocateLock() (uint32, error) {
+ if !locks.valid {
+ return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ id := uint32(0)
+ for ; ; id++ {
+ path := locks.getLockPath(id)
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ if os.IsExist(err) {
+ continue
+ }
+ return 0, errors.Wrapf(err, "creating lock file")
+ }
+ f.Close()
+ break
+ }
+ return id, nil
+}
+
+// AllocateGivenLock allocates the given lock from the shared-memory
+// segment for use by a container or pod.
+// If the lock is already in use or the index is invalid an error will be
+// returned.
+func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return errors.Wrapf(err, "error creating lock %d", lck)
+ }
+ f.Close()
+
+ return nil
+}
+
+// DeallocateLock frees a lock in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given lock must be already allocated, or an error will be returned.
+func (locks *FileLocks) DeallocateLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ if err := os.Remove(locks.getLockPath(lck)); err != nil {
+ return errors.Wrapf(err, "deallocating lock %d", lck)
+ }
+ return nil
+}
+
+// DeallocateAllLocks frees all locks so they can be reallocated to
+// other containers and pods.
+func (locks *FileLocks) DeallocateAllLocks() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ files, err := ioutil.ReadDir(locks.lockPath)
+ if err != nil {
+ return errors.Wrapf(err, "error reading directory %s", locks.lockPath)
+ }
+ var lastErr error
+ for _, f := range files {
+ p := filepath.Join(locks.lockPath, f.Name())
+ err := os.Remove(p)
+ if err != nil {
+ lastErr = err
+ logrus.Errorf("deallocating lock %s", p)
+ }
+ }
+ return lastErr
+}
+
+// LockFileLock locks the given lock.
+func (locks *FileLocks) LockFileLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ l, err := storage.GetLockfile(locks.getLockPath(lck))
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring lock")
+ }
+
+ l.Lock()
+ return nil
+}
+
+// UnlockFileLock unlocks the given lock.
+func (locks *FileLocks) UnlockFileLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ l, err := storage.GetLockfile(locks.getLockPath(lck))
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring lock")
+ }
+
+ l.Unlock()
+ return nil
+}
diff --git a/libpod/lock/file/file_lock_test.go b/libpod/lock/file/file_lock_test.go
new file mode 100644
index 000000000..6320d6b70
--- /dev/null
+++ b/libpod/lock/file/file_lock_test.go
@@ -0,0 +1,74 @@
+package file
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Test that creating and destroying locks work
+func TestCreateAndDeallocate(t *testing.T) {
+ d, err := ioutil.TempDir("", "filelock")
+ assert.NoError(t, err)
+ defer os.RemoveAll(d)
+
+ l, err := OpenFileLock(filepath.Join(d, "locks"))
+ assert.Error(t, err)
+
+ l, err = CreateFileLock(filepath.Join(d, "locks"))
+ assert.NoError(t, err)
+
+ lock, err := l.AllocateLock()
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.Error(t, err)
+
+ err = l.DeallocateLock(lock)
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.NoError(t, err)
+
+ err = l.DeallocateAllLocks()
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.NoError(t, err)
+
+ err = l.DeallocateAllLocks()
+ assert.NoError(t, err)
+}
+
+// Test that creating and destroying locks work
+func TestLockAndUnlock(t *testing.T) {
+ d, err := ioutil.TempDir("", "filelock")
+ assert.NoError(t, err)
+ defer os.RemoveAll(d)
+
+ l, err := CreateFileLock(filepath.Join(d, "locks"))
+ assert.NoError(t, err)
+
+ lock, err := l.AllocateLock()
+ assert.NoError(t, err)
+
+ err = l.LockFileLock(lock)
+ assert.NoError(t, err)
+
+ lslocks, err := exec.LookPath("lslocks")
+ if err == nil {
+ lockPath := l.getLockPath(lock)
+ out, err := exec.Command(lslocks, "--json", "-p", fmt.Sprintf("%d", os.Getpid())).CombinedOutput()
+ assert.NoError(t, err)
+
+ assert.Contains(t, string(out), lockPath)
+ }
+
+ err = l.UnlockFileLock(lock)
+ assert.NoError(t, err)
+}
diff --git a/libpod/lock/file_lock_manager.go b/libpod/lock/file_lock_manager.go
new file mode 100644
index 000000000..8a4d939d3
--- /dev/null
+++ b/libpod/lock/file_lock_manager.go
@@ -0,0 +1,110 @@
+package lock
+
+import (
+ "github.com/containers/libpod/libpod/lock/file"
+)
+
+// FileLockManager manages shared memory locks.
+type FileLockManager struct {
+ locks *file.FileLocks
+}
+
+// NewFileLockManager makes a new FileLockManager at the specified directory.
+func NewFileLockManager(lockPath string) (Manager, error) {
+ locks, err := file.CreateFileLock(lockPath)
+ if err != nil {
+ return nil, err
+ }
+
+ manager := new(FileLockManager)
+ manager.locks = locks
+
+ return manager, nil
+}
+
+// OpenFileLockManager opens an existing FileLockManager at the specified directory.
+func OpenFileLockManager(path string) (Manager, error) {
+ locks, err := file.OpenFileLock(path)
+ if err != nil {
+ return nil, err
+ }
+
+ manager := new(FileLockManager)
+ manager.locks = locks
+
+ return manager, nil
+}
+
+// AllocateLock allocates a new lock from the manager.
+func (m *FileLockManager) AllocateLock() (Locker, error) {
+ semIndex, err := m.locks.AllocateLock()
+ if err != nil {
+ return nil, err
+ }
+
+ lock := new(FileLock)
+ lock.lockID = semIndex
+ lock.manager = m
+
+ return lock, nil
+}
+
+// AllocateAndRetrieveLock allocates the lock with the given ID and returns it.
+// If the lock is already allocated, error.
+func (m *FileLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
+ lock := new(FileLock)
+ lock.lockID = id
+ lock.manager = m
+
+ if err := m.locks.AllocateGivenLock(id); err != nil {
+ return nil, err
+ }
+
+ return lock, nil
+}
+
+// RetrieveLock retrieves a lock from the manager given its ID.
+func (m *FileLockManager) RetrieveLock(id uint32) (Locker, error) {
+ lock := new(FileLock)
+ lock.lockID = id
+ lock.manager = m
+
+ return lock, nil
+}
+
+// FreeAllLocks frees all locks in the manager.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *FileLockManager) FreeAllLocks() error {
+ return m.locks.DeallocateAllLocks()
+}
+
+// FileLock is an individual shared memory lock.
+type FileLock struct {
+ lockID uint32
+ manager *FileLockManager
+}
+
+// ID returns the ID of the lock.
+func (l *FileLock) ID() uint32 {
+ return l.lockID
+}
+
+// Lock acquires the lock.
+func (l *FileLock) Lock() {
+ if err := l.manager.locks.LockFileLock(l.lockID); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Unlock releases the lock.
+func (l *FileLock) Unlock() {
+ if err := l.manager.locks.UnlockFileLock(l.lockID); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Free releases the lock, allowing it to be reused.
+func (l *FileLock) Free() error {
+ return l.manager.locks.DeallocateLock(l.lockID)
+}
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index 76dd5729e..322e92a8f 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -1,3 +1,5 @@
+// +build linux,cgo
+
package shm
// #cgo LDFLAGS: -lrt -lpthread
@@ -20,7 +22,7 @@ var (
// BitmapSize is the size of the bitmap used when managing SHM locks.
// an SHM lock manager's max locks will be rounded up to a multiple of
// this number.
- BitmapSize uint32 = uint32(C.bitmap_size_c)
+ BitmapSize = uint32(C.bitmap_size_c)
)
// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
diff --git a/libpod/lock/shm/shm_lock_nocgo.go b/libpod/lock/shm/shm_lock_nocgo.go
new file mode 100644
index 000000000..ea1488c90
--- /dev/null
+++ b/libpod/lock/shm/shm_lock_nocgo.go
@@ -0,0 +1,102 @@
+// +build linux,!cgo
+
+package shm
+
+import (
+ "github.com/sirupsen/logrus"
+)
+
+// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
+// segment.
+type SHMLocks struct {
+}
+
+// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
+// semaphores, and returns a struct that can be used to operate on those locks.
+// numLocks must not be 0, and may be rounded up to a multiple of the bitmap
+// size used by the underlying implementation.
+func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ logrus.Error("locks are not supported without cgo")
+ return &SHMLocks{}, nil
+}
+
+// OpenSHMLock opens an existing shared-memory segment holding a given number of
+// POSIX semaphores. numLocks must match the number of locks the shared memory
+// segment was created with.
+func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ logrus.Error("locks are not supported without cgo")
+ return &SHMLocks{}, nil
+}
+
+// GetMaxLocks returns the maximum number of locks in the SHM
+func (locks *SHMLocks) GetMaxLocks() uint32 {
+ logrus.Error("locks are not supported without cgo")
+ return 0
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *SHMLocks) Close() error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
+// by a container or pod.
+// Returns the index of the semaphore that was allocated.
+// Allocations past the maximum number of locks given when the SHM segment was
+// created will result in an error, and no semaphore will be allocated.
+func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
+ logrus.Error("locks are not supported without cgo")
+ return 0, nil
+}
+
+// AllocateGivenSemaphore allocates the given semaphore from the shared-memory
+// segment for use by a container or pod.
+// If the semaphore is already in use or the index is invalid an error will be
+// returned.
+func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given semaphore must be already allocated, or an error will be returned.
+func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
+// other containers and pods.
+func (locks *SHMLocks) DeallocateAllSemaphores() error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// LockSemaphore locks the given semaphore.
+// If the semaphore is already locked, LockSemaphore will block until the lock
+// can be acquired.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) LockSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// UnlockSemaphore unlocks the given semaphore.
+// Unlocking a semaphore that is already unlocked with return EBUSY.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 93ec157c5..d978bceed 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -28,21 +28,23 @@ import (
// Get an OCICNI network config
func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork {
+ defaultNetwork := r.netPlugin.GetDefaultNetworkName()
network := ocicni.PodNetwork{
- Name: name,
- Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
- ID: id,
- NetNS: nsPath,
- PortMappings: ports,
- Networks: networks,
+ Name: name,
+ Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
+ ID: id,
+ NetNS: nsPath,
+ Networks: networks,
+ RuntimeConfig: map[string]ocicni.RuntimeConfig{
+ defaultNetwork: {PortMappings: ports},
+ },
}
if staticIP != nil {
- defaultNetwork := r.netPlugin.GetDefaultNetworkName()
-
network.Networks = []string{defaultNetwork}
- network.NetworkConfig = make(map[string]ocicni.NetworkConfig)
- network.NetworkConfig[defaultNetwork] = ocicni.NetworkConfig{IP: staticIP.String()}
+ network.RuntimeConfig = map[string]ocicni.RuntimeConfig{
+ defaultNetwork: {IP: staticIP.String(), PortMappings: ports},
+ }
}
return network
@@ -292,14 +294,14 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
}
buf := make([]byte, 2048)
- len, err := conn.Read(buf)
+ readLength, err := conn.Read(buf)
if err != nil {
return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
}
// if there is no 'error' key in the received JSON data, then the operation was
// successful.
var y map[string]interface{}
- if err := json.Unmarshal(buf[0:len], &y); err != nil {
+ if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
return errors.Wrapf(err, "error parsing error status from slirp4netns")
}
if e, found := y["error"]; found {
@@ -330,7 +332,9 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) {
if err != nil {
return errors.Wrapf(err, "cannot open %s", nsPath)
}
- mountPointFd.Close()
+ if err := mountPointFd.Close(); err != nil {
+ return err
+ }
if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
return errors.Wrapf(err, "cannot mount %s", nsPath)
@@ -350,12 +354,12 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) {
// Join an existing network namespace
func joinNetNS(path string) (ns.NetNS, error) {
- ns, err := ns.GetNS(path)
+ netNS, err := ns.GetNS(path)
if err != nil {
return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path)
}
- return ns, nil
+ return netNS, nil
}
// Close a network namespace.
diff --git a/libpod/oci.go b/libpod/oci.go
index efb5e42cc..6aad79cdf 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -234,6 +234,8 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
// Alright, it exists. Transition to Stopped state.
ctr.state.State = define.ContainerStateStopped
+ ctr.state.PID = 0
+ ctr.state.ConmonPID = 0
// Read the exit file to get our stopped time and exit code.
return ctr.handleExitFile(exitFile, info)
@@ -261,7 +263,9 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
return errors.Wrapf(err, "error getting container %s state", ctr.ID())
}
if strings.Contains(string(out), "does not exist") {
- ctr.removeConmonFiles()
+ if err := ctr.removeConmonFiles(); err != nil {
+ logrus.Debugf("unable to remove conmon files for container %s", ctr.ID())
+ }
ctr.state.ExitCode = -1
ctr.state.FinishedTime = time.Now()
ctr.state.State = define.ContainerStateExited
@@ -271,7 +275,9 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
}
defer cmd.Wait()
- errPipe.Close()
+ if err := errPipe.Close(); err != nil {
+ return err
+ }
out, err := ioutil.ReadAll(outPipe)
if err != nil {
return errors.Wrapf(err, "error reading stdout: %s", ctr.ID())
@@ -431,8 +437,8 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
args = append(args, "--no-new-privs")
}
- for _, cap := range capAdd {
- args = append(args, "--cap", cap)
+ for _, capabilityAdd := range capAdd {
+ args = append(args, "--cap", capabilityAdd)
}
for _, envVar := range env {
@@ -473,7 +479,9 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
for fd := 3; fd < 3+preserveFDs; fd++ {
// These fds were passed down to the runtime. Close them
// and not interfere
- os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close()
+ if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
+ logrus.Debugf("unable to close file fd-%d", fd)
+ }
}
}
@@ -482,7 +490,9 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
// checkpointContainer checkpoints the given container
func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
- label.SetSocketLabel(ctr.ProcessLabel())
+ if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil {
+ return err
+ }
// imagePath is used by CRIU to store the actual checkpoint files
imagePath := ctr.CheckpointPath()
// workPath will be used to store dump.log and stats-dump
diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go
index 7d9f47ae2..802f4311b 100644
--- a/libpod/oci_linux.go
+++ b/libpod/oci_linux.go
@@ -342,7 +342,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
)
plabel, err = selinux.CurrentLabel()
if err != nil {
- childPipe.Close()
+ if err := childPipe.Close(); err != nil {
+ logrus.Errorf("failed to close child pipe: %q", err)
+ }
return errors.Wrapf(err, "Failed to get current SELinux label")
}
@@ -446,6 +448,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
return errors.Wrapf(define.ErrInternal, "container create failed")
}
ctr.state.PID = ss.si.Pid
+ if cmd.Process != nil {
+ ctr.state.ConmonPID = cmd.Process.Pid
+ }
case <-time.After(ContainerCreateTimeout):
return errors.Wrapf(define.ErrInternal, "container creation timeout")
}
diff --git a/libpod/options.go b/libpod/options.go
index 0f23a6c97..4f8bb42df 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -300,6 +300,15 @@ func WithTmpDir(dir string) RuntimeOption {
}
}
+// WithNoStore sets a bool on the runtime that we do not need
+// any containers storage.
+func WithNoStore() RuntimeOption {
+ return func(rt *Runtime) error {
+ rt.noStore = true
+ return nil
+ }
+}
+
// WithMaxLogSize sets the maximum size of container logs.
// Positive sizes are limits in bytes, -1 is unlimited.
func WithMaxLogSize(limit int64) RuntimeOption {
@@ -316,7 +325,7 @@ func WithMaxLogSize(limit int64) RuntimeOption {
// WithNoPivotRoot sets the runtime to use MS_MOVE instead of PIVOT_ROOT when
// starting containers.
-func WithNoPivotRoot(noPivot bool) RuntimeOption {
+func WithNoPivotRoot() RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
return config2.ErrRuntimeFinalized
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 5a618f592..53c9a1209 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -81,6 +81,10 @@ var (
DefaultSHMLockPath = "/libpod_lock"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
+
+ // DefaultDetachKeys is the default keys sequence for detaching a
+ // container
+ DefaultDetachKeys = "ctrl-p,ctrl-q"
)
// A RuntimeOption is a functional option which alters the Runtime created by
@@ -121,6 +125,9 @@ type Runtime struct {
// mechanism to read and write even logs
eventer events.Eventer
+
+ // noStore indicates whether we need to interact with a store or not
+ noStore bool
}
// RuntimeConfig contains configuration options used to set up the runtime
@@ -232,10 +239,15 @@ type RuntimeConfig struct {
// pods.
NumLocks uint32 `toml:"num_locks,omitempty"`
+ // LockType is the type of locking to use.
+ LockType string `toml:"lock_type,omitempty"`
+
// EventsLogger determines where events should be logged
EventsLogger string `toml:"events_logger"`
// EventsLogFilePath is where the events log is stored.
- EventsLogFilePath string `toml:-"events_logfile_path"`
+ EventsLogFilePath string `toml:"-events_logfile_path"`
+ //DetachKeys is the sequence of keys used to detach a container
+ DetachKeys string `toml:"detach_keys"`
}
// runtimeConfiguredFrom is a struct used during early runtime init to help
@@ -308,6 +320,8 @@ func defaultRuntimeConfig() (RuntimeConfig, error) {
EnableLabeling: true,
NumLocks: 2048,
EventsLogger: events.DefaultEventerType.String(),
+ DetachKeys: DefaultDetachKeys,
+ LockType: "shm",
}, nil
}
@@ -629,7 +643,9 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
}
if configPath != "" {
- os.MkdirAll(filepath.Dir(configPath), 0755)
+ if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil {
+ return nil, err
+ }
file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil && !os.IsExist(err) {
return nil, errors.Wrapf(err, "cannot open file %s", configPath)
@@ -638,7 +654,9 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
defer file.Close()
enc := toml.NewEncoder(file)
if err := enc.Encode(runtime.config); err != nil {
- os.Remove(configPath)
+ if removeErr := os.Remove(configPath); removeErr != nil {
+ logrus.Debugf("unable to remove %s: %q", configPath, err)
+ }
}
}
}
@@ -649,6 +667,62 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
return runtime, nil
}
+func getLockManager(runtime *Runtime) (lock.Manager, error) {
+ var err error
+ var manager lock.Manager
+
+ switch runtime.config.LockType {
+ case "file":
+ lockPath := filepath.Join(runtime.config.TmpDir, "locks")
+ manager, err = lock.OpenFileLockManager(lockPath)
+ if err != nil {
+ if os.IsNotExist(errors.Cause(err)) {
+ manager, err = lock.NewFileLockManager(lockPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get new file lock manager")
+ }
+ } else {
+ return nil, err
+ }
+ }
+
+ case "", "shm":
+ lockPath := DefaultSHMLockPath
+ if rootless.IsRootless() {
+ lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
+ }
+ // Set up the lock manager
+ manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ if os.IsNotExist(errors.Cause(err)) {
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get new shm lock manager")
+ }
+ } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
+ logrus.Debugf("Number of locks does not match - removing old locks")
+
+ // ERANGE indicates a lock numbering mismatch.
+ // Since we're renumbering, this is not fatal.
+ // Remove the earlier set of locks and recreate.
+ if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
+ return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ }
+
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ }
+ default:
+ return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.LockType)
+ }
+ return manager, nil
+}
+
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
@@ -777,11 +851,14 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
var store storage.Store
if os.Geteuid() != 0 {
logrus.Debug("Not configuring container store")
+ } else if runtime.noStore {
+ logrus.Debug("No store required. Not opening container store.")
} else {
store, err = storage.GetStore(runtime.config.StorageConfig)
if err != nil {
return err
}
+ err = nil
defer func() {
if err != nil && store != nil {
@@ -1031,37 +1108,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
}
- lockPath := DefaultSHMLockPath
- if rootless.IsRootless() {
- lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
- }
- // Set up the lock manager
- manager, err := lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
+ runtime.lockManager, err = getLockManager(runtime)
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
- manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
- if err != nil {
- return errors.Wrapf(err, "failed to get new shm lock manager")
- }
- } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
- logrus.Debugf("Number of locks does not match - removing old locks")
-
- // ERANGE indicates a lock numbering mismatch.
- // Since we're renumbering, this is not fatal.
- // Remove the earlier set of locks and recreate.
- if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
- return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
- }
-
- manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
- if err != nil {
- return err
- }
- } else {
- return err
- }
+ return err
}
- runtime.lockManager = manager
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
@@ -1141,6 +1191,8 @@ func (r *Runtime) Shutdown(force bool) error {
}
var lastError error
+ // If no store was requested, it can bew nil and there is no need to
+ // attempt to shut it down
if r.store != nil {
if _, err := r.store.Shutdown(force); err != nil {
lastError = errors.Wrapf(err, "Error shutting down container storage")
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 0d0f700a6..d022478b1 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -132,6 +132,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bo
ctr.config.LockID = ctr.lock.ID()
logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID())
+ defer func() {
+ if err != nil {
+ if err2 := ctr.lock.Free(); err2 != nil {
+ logrus.Errorf("Error freeing lock for container after creation failed: %v", err2)
+ }
+ }
+ }()
+
ctr.valid = true
ctr.state.State = config2.ContainerStateConfigured
ctr.runtime = r
@@ -422,22 +430,17 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// If we're removing the pod, the container will be evicted
// from the state elsewhere
if !removePod {
- if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
- if cleanupErr == nil {
- cleanupErr = err
- } else {
- logrus.Errorf("removing container from pod: %v", err)
- }
- }
- }
- } else {
- if err := r.state.RemoveContainer(c); err != nil {
if cleanupErr == nil {
cleanupErr = err
} else {
- logrus.Errorf("removing container: %v", err)
+ logrus.Errorf("removing container from pod: %v", err)
}
}
+ } else {
+ if err := r.state.RemoveContainer(c); err != nil {
+ cleanupErr = err
+ }
+ logrus.Errorf("removing container: %v", err)
}
// Set container as invalid so it can no longer be used
diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go
index ad45579d3..c363991e6 100644
--- a/libpod/runtime_migrate.go
+++ b/libpod/runtime_migrate.go
@@ -37,7 +37,9 @@ func stopPauseProcess() error {
if err := os.Remove(pausePidPath); err != nil {
return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath)
}
- syscall.Kill(pausePid, syscall.SIGKILL)
+ if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil {
+ return err
+ }
}
return nil
}
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index e9ce130da..d667d3a25 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -19,7 +19,7 @@ import (
)
// NewPod makes a new, empty pod
-func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, error) {
+func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Pod, Err error) {
r.lock.Lock()
defer r.lock.Unlock()
@@ -60,6 +60,14 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
pod.lock = lock
pod.config.LockID = pod.lock.ID()
+ defer func() {
+ if Err != nil {
+ if err := pod.lock.Free(); err != nil {
+ logrus.Errorf("Error freeing pod lock after failed creation: %v", err)
+ }
+ }
+ }()
+
pod.valid = true
// Check CGroup parent sanity, and set it if it was not set
@@ -113,15 +121,17 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
if err := r.state.AddPod(pod); err != nil {
return nil, errors.Wrapf(err, "error adding pod to state")
}
+ defer func() {
+ if Err != nil {
+ if err := r.removePod(ctx, pod, true, true); err != nil {
+ logrus.Errorf("Error removing pod after pause container creation failure: %v", err)
+ }
+ }
+ }()
if pod.HasInfraContainer() {
ctr, err := r.createInfraContainer(ctx, pod)
if err != nil {
- // Tear down pod, as it is assumed a the pod will contain
- // a pause container, and it does not.
- if err2 := r.removePod(ctx, pod, true, true); err2 != nil {
- logrus.Errorf("Error removing pod after pause container creation failure: %v", err2)
- }
return nil, errors.Wrapf(err, "error adding Infra Container")
}
pod.state.InfraContainerID = ctr.ID()
diff --git a/libpod/stats.go b/libpod/stats.go
index da383e9d9..52af824bb 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -3,6 +3,7 @@
package libpod
import (
+ "runtime"
"strings"
"syscall"
"time"
@@ -45,10 +46,6 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
return stats, errors.Wrapf(err, "unable to obtain cgroup stats")
}
conState := c.state.State
- if err != nil {
- return stats, errors.Wrapf(err, "unable to determine container state")
- }
-
netStats, err := getContainerNetIO(c)
if err != nil {
return nil, err
@@ -105,7 +102,11 @@ func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, previousSystem uin
if systemDelta > 0.0 && cpuDelta > 0.0 {
// gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running
// at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage
- cpuPercent = (cpuDelta / systemDelta) * float64(len(stats.CPU.Usage.PerCPU)) * 100
+ nCPUS := len(stats.CPU.Usage.PerCPU)
+ if nCPUS == 0 {
+ nCPUS = runtime.NumCPU()
+ }
+ cpuPercent = (cpuDelta / systemDelta) * float64(nCPUS) * 100
}
return cpuPercent
}
diff --git a/libpod/util.go b/libpod/util.go
index b0c25074b..b60575264 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -9,8 +9,6 @@ import (
"strings"
"time"
- "github.com/containers/image/signature"
- "github.com/containers/image/types"
"github.com/containers/libpod/libpod/define"
"github.com/fsnotify/fsnotify"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -32,24 +30,6 @@ func FuncTimer(funcName string) {
fmt.Printf("%s executed in %d ms\n", funcName, elapsed)
}
-// CopyStringStringMap deep copies a map[string]string and returns the result
-func CopyStringStringMap(m map[string]string) map[string]string {
- n := map[string]string{}
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-// GetPolicyContext creates a signature policy context for the given signature policy path
-func GetPolicyContext(path string) (*signature.PolicyContext, error) {
- policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path})
- if err != nil {
- return nil, err
- }
- return signature.NewPolicyContext(policy)
-}
-
// RemoveScientificNotationFromFloat returns a float without any
// scientific notation if the number has any.
// golang does not handle conversion of float64s that have scientific