summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go14
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/common_test.go19
-rw-r--r--libpod/container.go101
-rw-r--r--libpod/container.log.go73
-rw-r--r--libpod/container_api.go73
-rw-r--r--libpod/container_attach_linux.go48
-rw-r--r--libpod/container_attach_linux_cgo.go11
-rw-r--r--libpod/container_attach_linux_nocgo.go7
-rw-r--r--libpod/container_attach_unsupported.go4
-rw-r--r--libpod/container_commit.go3
-rw-r--r--libpod/container_graph.go4
-rw-r--r--libpod/container_inspect.go106
-rw-r--r--libpod/container_internal.go170
-rw-r--r--libpod/container_internal_linux.go74
-rw-r--r--libpod/container_log_linux.go15
-rw-r--r--libpod/container_log_unsupported.go3
-rw-r--r--libpod/container_top_linux.go9
-rw-r--r--libpod/container_top_unsupported.go6
-rw-r--r--libpod/define/config.go10
-rw-r--r--libpod/define/containerstate.go73
-rw-r--r--libpod/define/version.go (renamed from libpod/version.go)2
-rw-r--r--libpod/events.go55
-rw-r--r--libpod/events/config.go11
-rw-r--r--libpod/events/events.go2
-rw-r--r--libpod/events/journal_linux.go4
-rw-r--r--libpod/healthcheck.go3
-rw-r--r--libpod/image/image.go94
-rw-r--r--libpod/image/pull.go4
-rw-r--r--libpod/info.go6
-rw-r--r--libpod/kube.go164
-rw-r--r--libpod/lock/file/file_lock.go175
-rw-r--r--libpod/lock/file/file_lock_test.go74
-rw-r--r--libpod/lock/file_lock_manager.go110
-rw-r--r--libpod/lock/shm/shm_lock.go4
-rw-r--r--libpod/lock/shm/shm_lock_nocgo.go102
-rw-r--r--libpod/logs/log.go (renamed from libpod/container_log.go)96
-rw-r--r--libpod/networking_linux.go34
-rw-r--r--libpod/oci.go38
-rw-r--r--libpod/oci_linux.go7
-rw-r--r--libpod/options.go11
-rw-r--r--libpod/pod_api.go12
-rw-r--r--libpod/pod_top_linux.go3
-rw-r--r--libpod/runtime.go253
-rw-r--r--libpod/runtime_ctr.go48
-rw-r--r--libpod/runtime_migrate.go6
-rw-r--r--libpod/runtime_pod_linux.go22
-rw-r--r--libpod/state_test.go5
-rw-r--r--libpod/stats.go15
-rw-r--r--libpod/util.go31
50 files changed, 1483 insertions, 738 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index b047c9fa0..4dda3a7f0 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -381,11 +381,6 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
defer s.closeDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
- idBucket, err := getIDBucket(tx)
- if err != nil {
- return err
- }
-
ctrBucket, err := getCtrBucket(tx)
if err != nil {
return err
@@ -436,7 +431,7 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
// We were not given a full container ID or name.
// Search for partial ID matches.
exists := false
- err = idBucket.ForEach(func(checkID, checkName []byte) error {
+ err = ctrBucket.ForEach(func(checkID, checkName []byte) error {
// If the container isn't in our namespace, we
// can't match it
if s.namespaceBytes != nil {
@@ -963,11 +958,6 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
defer s.closeDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
- idBucket, err := getIDBucket(tx)
- if err != nil {
- return err
- }
-
podBkt, err := getPodBucket(tx)
if err != nil {
return err
@@ -1015,7 +1005,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
// They did not give us a full pod name or ID.
// Search for partial ID matches.
exists := false
- err = idBucket.ForEach(func(checkID, checkName []byte) error {
+ err = podBkt.ForEach(func(checkID, checkName []byte) error {
// If the pod isn't in our namespace, we
// can't match it
if s.namespaceBytes != nil {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 122bb5935..ee2784cdd 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -339,7 +339,6 @@ func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
}
func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.Bucket) error {
- valid := true
ctrBkt := ctrsBkt.Bucket(id)
if ctrBkt == nil {
return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id))
@@ -386,7 +385,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
}
ctr.runtime = s.runtime
- ctr.valid = valid
+ ctr.valid = true
return nil
}
@@ -639,7 +638,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
}
// Add ctr to pod
- if pod != nil {
+ if pod != nil && podCtrs != nil {
if err := podCtrs.Put(ctrID, ctrName); err != nil {
return errors.Wrapf(err, "error adding container %s to pod %s", ctr.ID(), pod.ID())
}
@@ -737,7 +736,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
}
- if podDB != nil {
+ if podDB != nil && pod != nil {
// Check if the container is in the pod, remove it if it is
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
diff --git a/libpod/common_test.go b/libpod/common_test.go
index df730098e..93ca7bc71 100644
--- a/libpod/common_test.go
+++ b/libpod/common_test.go
@@ -7,6 +7,7 @@ import (
"testing"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/opencontainers/runtime-tools/generate"
@@ -49,7 +50,7 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
},
},
state: &ContainerState{
- State: ContainerStateRunning,
+ State: define.ContainerStateRunning,
ConfigPath: "/does/not/exist/specs/" + id,
RunDir: "/does/not/exist/tmp/",
Mounted: true,
@@ -88,13 +89,13 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
ctr.config.Labels["test"] = "testing"
- // Allocate a lock for the container
- lock, err := manager.AllocateLock()
+ // Allocate a containerLock for the container
+ containerLock, err := manager.AllocateLock()
if err != nil {
return nil, err
}
- ctr.lock = lock
- ctr.config.LockID = lock.ID()
+ ctr.lock = containerLock
+ ctr.config.LockID = containerLock.ID()
return ctr, nil
}
@@ -113,13 +114,13 @@ func getTestPod(id, name string, manager lock.Manager) (*Pod, error) {
valid: true,
}
- // Allocate a lock for the pod
- lock, err := manager.AllocateLock()
+ // Allocate a podLock for the pod
+ podLock, err := manager.AllocateLock()
if err != nil {
return nil, err
}
- pod.lock = lock
- pod.config.LockID = lock.ID()
+ pod.lock = podLock
+ pod.config.LockID = podLock.ID()
return pod, nil
}
diff --git a/libpod/container.go b/libpod/container.go
index d05baa7e0..a9b512de9 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -21,31 +21,6 @@ import (
"github.com/pkg/errors"
)
-// ContainerStatus represents the current state of a container
-type ContainerStatus int
-
-const (
- // ContainerStateUnknown indicates that the container is in an error
- // state where information about it cannot be retrieved
- ContainerStateUnknown ContainerStatus = iota
- // ContainerStateConfigured indicates that the container has had its
- // storage configured but it has not been created in the OCI runtime
- ContainerStateConfigured ContainerStatus = iota
- // ContainerStateCreated indicates the container has been created in
- // the OCI runtime but not started
- ContainerStateCreated ContainerStatus = iota
- // ContainerStateRunning indicates the container is currently executing
- ContainerStateRunning ContainerStatus = iota
- // ContainerStateStopped indicates that the container was running but has
- // exited
- ContainerStateStopped ContainerStatus = iota
- // ContainerStatePaused indicates that the container has been paused
- ContainerStatePaused ContainerStatus = iota
- // ContainerStateExited indicates the the container has stopped and been
- // cleaned up
- ContainerStateExited ContainerStatus = iota
-)
-
// CgroupfsDefaultCgroupParent is the cgroup parent for CGroupFS in libpod
const CgroupfsDefaultCgroupParent = "/libpod_parent"
@@ -163,13 +138,16 @@ type Container struct {
// being checkpointed. If requestedIP is set it will be used instead
// of config.StaticIP.
requestedIP net.IP
+
+ // This is true if a container is restored from a checkpoint.
+ restoreFromCheckpoint bool
}
// ContainerState contains the current state of the container
// It is stored on disk in a tmpfs and recreated on reboot
type ContainerState struct {
// The current state of the running container
- State ContainerStatus `json:"state"`
+ State define.ContainerStatus `json:"state"`
// The path to the JSON OCI runtime spec for this container
ConfigPath string `json:"configPath,omitempty"`
// RunDir is a per-boot directory for container content
@@ -193,6 +171,8 @@ type ContainerState struct {
OOMKilled bool `json:"oomKilled,omitempty"`
// PID is the PID of a running container
PID int `json:"pid,omitempty"`
+ // ConmonPID is the PID of the container's conmon
+ ConmonPID int `json:"conmonPid,omitempty"`
// ExecSessions contains active exec sessions for container
// Exec session ID is mapped to PID of exec process
ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"`
@@ -428,51 +408,6 @@ type ContainerNamedVolume struct {
Options []string `json:"options,omitempty"`
}
-// ContainerStatus returns a string representation for users
-// of a container state
-func (t ContainerStatus) String() string {
- switch t {
- case ContainerStateUnknown:
- return "unknown"
- case ContainerStateConfigured:
- return "configured"
- case ContainerStateCreated:
- return "created"
- case ContainerStateRunning:
- return "running"
- case ContainerStateStopped:
- return "stopped"
- case ContainerStatePaused:
- return "paused"
- case ContainerStateExited:
- return "exited"
- }
- return "bad state"
-}
-
-// StringToContainerStatus converts a string representation of a containers
-// status into an actual container status type
-func StringToContainerStatus(status string) (ContainerStatus, error) {
- switch status {
- case ContainerStateUnknown.String():
- return ContainerStateUnknown, nil
- case ContainerStateConfigured.String():
- return ContainerStateConfigured, nil
- case ContainerStateCreated.String():
- return ContainerStateCreated, nil
- case ContainerStateRunning.String():
- return ContainerStateRunning, nil
- case ContainerStateStopped.String():
- return ContainerStateStopped, nil
- case ContainerStatePaused.String():
- return ContainerStatePaused, nil
- case ContainerStateExited.String():
- return ContainerStateExited, nil
- default:
- return ContainerStateUnknown, errors.Wrapf(define.ErrInvalidArg, "unknown container state: %s", status)
- }
-}
-
// Config accessors
// Unlocked
@@ -823,13 +758,13 @@ func (c *Container) WorkingDir() string {
// Require locking
// State returns the current state of the container
-func (c *Container) State() (ContainerStatus, error) {
+func (c *Container) State() (define.ContainerStatus, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
- return ContainerStateUnknown, err
+ return define.ContainerStateUnknown, err
}
}
return c.state.State, nil
@@ -919,7 +854,7 @@ func (c *Container) OOMKilled() (bool, error) {
return c.state.OOMKilled, nil
}
-// PID returns the PID of the container
+// PID returns the PID of the container.
// If the container is not running, a pid of 0 will be returned. No error will
// occur.
func (c *Container) PID() (int, error) {
@@ -935,6 +870,22 @@ func (c *Container) PID() (int, error) {
return c.state.PID, nil
}
+// ConmonPID Returns the PID of the container's conmon process.
+// If the container is not running, a PID of 0 will be returned. No error will
+// occur.
+func (c *Container) ConmonPID() (int, error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return -1, err
+ }
+ }
+
+ return c.state.ConmonPID, nil
+}
+
// ExecSessions retrieves active exec sessions running in the container
func (c *Container) ExecSessions() ([]string, error) {
if !c.batched {
@@ -1097,7 +1048,7 @@ func (c *Container) NamespacePath(ns LinuxNS) (string, error) {
}
}
- if c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused {
+ if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
}
diff --git a/libpod/container.log.go b/libpod/container.log.go
new file mode 100644
index 000000000..7d0cd5bfb
--- /dev/null
+++ b/libpod/container.log.go
@@ -0,0 +1,73 @@
+package libpod
+
+import (
+ "os"
+
+ "github.com/containers/libpod/libpod/logs"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Log is a runtime function that can read one or more container logs.
+func (r *Runtime) Log(containers []*Container, options *logs.LogOptions, logChannel chan *logs.LogLine) error {
+ for _, ctr := range containers {
+ if err := ctr.ReadLog(options, logChannel); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ReadLog reads a containers log based on the input options and returns loglines over a channel
+func (c *Container) ReadLog(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
+ // TODO Skip sending logs until journald logs can be read
+ // TODO make this not a magic string
+ if c.LogDriver() == JournaldLogging {
+ return c.readFromJournal(options, logChannel)
+ }
+ return c.readFromLogFile(options, logChannel)
+}
+
+func (c *Container) readFromLogFile(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
+ t, tailLog, err := logs.GetLogFile(c.LogPath(), options)
+ if err != nil {
+ // If the log file does not exist, this is not fatal.
+ if os.IsNotExist(errors.Cause(err)) {
+ return nil
+ }
+ return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
+ }
+ options.WaitGroup.Add(1)
+ if len(tailLog) > 0 {
+ for _, nll := range tailLog {
+ nll.CID = c.ID()
+ if nll.Since(options.Since) {
+ logChannel <- nll
+ }
+ }
+ }
+
+ go func() {
+ var partial string
+ for line := range t.Lines {
+ nll, err := logs.NewLogLine(line.Text)
+ if err != nil {
+ logrus.Error(err)
+ continue
+ }
+ if nll.Partial() {
+ partial = partial + nll.Msg
+ continue
+ } else if !nll.Partial() && len(partial) > 1 {
+ nll.Msg = partial
+ partial = ""
+ }
+ nll.CID = c.ID()
+ if nll.Since(options.Since) {
+ logChannel <- nll
+ }
+ }
+ options.WaitGroup.Done()
+ }()
+ return nil
+}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index b8c339a39..3dd84b02c 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -7,7 +7,6 @@ import (
"io/ioutil"
"os"
"strconv"
- "sync"
"time"
"github.com/containers/libpod/libpod/define"
@@ -37,9 +36,9 @@ func (c *Container) Init(ctx context.Context) (err error) {
}
}
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
+ if !(c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
}
@@ -55,7 +54,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container
return c.reinit(ctx, false)
}
@@ -120,20 +119,24 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
attachChan := make(chan error)
// We need to ensure that we don't return until start() fired in attach.
- // Use a WaitGroup to sync this.
- wg := new(sync.WaitGroup)
- wg.Add(1)
+ // Use a channel to sync
+ startedChan := make(chan bool)
// Attach to the container before starting it
go func() {
- if err := c.attach(streams, keys, resize, true, wg); err != nil {
+ if err := c.attach(streams, keys, resize, true, startedChan); err != nil {
attachChan <- err
}
close(attachChan)
}()
- wg.Wait()
- c.newContainerEvent(events.Attach)
+ select {
+ case err := <-attachChan:
+ return nil, err
+ case <-startedChan:
+ c.newContainerEvent(events.Attach)
+ }
+
return attachChan, nil
}
@@ -178,14 +181,14 @@ func (c *Container) StopWithTimeout(timeout uint) error {
}
}
- if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateUnknown ||
- c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateUnknown ||
+ c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String())
}
- if c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited {
+ if c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited {
return define.ErrCtrStopped
}
defer c.newContainerEvent(events.Stop)
@@ -203,7 +206,7 @@ func (c *Container) Kill(signal uint) error {
}
}
- if c.state.State != ContainerStateRunning {
+ if c.state.State != define.ContainerStateRunning {
return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}
@@ -241,7 +244,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
conState := c.state.State
// TODO can probably relax this once we track exec sessions
- if conState != ContainerStateRunning {
+ if conState != define.ContainerStateRunning {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running")
}
if privileged || c.config.Privileged {
@@ -399,9 +402,9 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re
c.lock.Unlock()
}
- if c.state.State != ContainerStateCreated &&
- c.state.State != ContainerStateRunning &&
- c.state.State != ContainerStateExited {
+ if c.state.State != define.ContainerStateCreated &&
+ c.state.State != define.ContainerStateRunning &&
+ c.state.State != define.ContainerStateExited {
return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
}
defer c.newContainerEvent(events.Attach)
@@ -440,7 +443,7 @@ func (c *Container) Unmount(force bool) error {
return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID())
}
if mounted == 1 {
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
}
if len(c.state.ExecSessions) != 0 {
@@ -464,10 +467,10 @@ func (c *Container) Pause() error {
}
}
- if c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID())
}
- if c.state.State != ContainerStateRunning {
+ if c.state.State != define.ContainerStateRunning {
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
}
defer c.newContainerEvent(events.Pause)
@@ -485,7 +488,7 @@ func (c *Container) Unpause() error {
}
}
- if c.state.State != ContainerStatePaused {
+ if c.state.State != define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
}
defer c.newContainerEvent(events.Unpause)
@@ -578,7 +581,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
}
// Check if state is good
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
@@ -656,9 +659,9 @@ func (c *Container) Sync() error {
// If runtime knows about the container, update its status in runtime
// And then save back to disk
- if (c.state.State != ContainerStateUnknown) &&
- (c.state.State != ContainerStateConfigured) &&
- (c.state.State != ContainerStateExited) {
+ if (c.state.State != define.ContainerStateUnknown) &&
+ (c.state.State != define.ContainerStateConfigured) &&
+ (c.state.State != define.ContainerStateExited) {
oldState := c.state.State
if err := c.ociRuntime.updateContainerStatus(c, true); err != nil {
return err
@@ -687,27 +690,27 @@ func (c *Container) Refresh(ctx context.Context) error {
}
wasCreated := false
- if c.state.State == ContainerStateCreated {
+ if c.state.State == define.ContainerStateCreated {
wasCreated = true
}
wasRunning := false
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
wasRunning = true
}
wasPaused := false
- if c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStatePaused {
wasPaused = true
}
// First, unpause the container if it's paused
- if c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStatePaused {
if err := c.unpause(); err != nil {
return err
}
}
// Next, if the container is running, stop it
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
if err := c.stop(c.config.StopTimeout); err != nil {
return err
}
@@ -724,7 +727,7 @@ func (c *Container) Refresh(ctx context.Context) error {
// If the container is in ContainerStateStopped, we need to delete it
// from the runtime and clear conmon state
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
if err := c.delete(ctx); err != nil {
return err
}
diff --git a/libpod/container_attach_linux.go b/libpod/container_attach_linux.go
index 5293480f0..17b09fccc 100644
--- a/libpod/container_attach_linux.go
+++ b/libpod/container_attach_linux.go
@@ -8,7 +8,6 @@ import (
"net"
"os"
"path/filepath"
- "sync"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/kubeutils"
@@ -20,10 +19,6 @@ import (
"k8s.io/client-go/tools/remotecommand"
)
-//#include <sys/un.h>
-// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
-import "C"
-
/* Sync with stdpipe_t in conmon.c */
const (
AttachPipeStdin = 1
@@ -33,32 +28,35 @@ const (
// Attach to the given container
// Does not check if state is appropriate
-func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, wg *sync.WaitGroup) error {
+func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error {
if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
}
- // Check the validity of the provided keys first
- var err error
- detachKeys := []byte{}
- if len(keys) > 0 {
- detachKeys, err = term.ToBytes(keys)
- if err != nil {
- return errors.Wrapf(err, "invalid detach keys")
- }
- }
-
logrus.Debugf("Attaching to container %s", c.ID())
- return c.attachContainerSocket(resize, detachKeys, streams, startContainer, wg)
+ return c.attachContainerSocket(resize, keys, streams, startContainer, started)
}
// attachContainerSocket connects to the container's attach socket and deals with the IO.
-// wg is only required if startContainer is true
+// started is only required if startContainer is true
// TODO add a channel to allow interrupting
-func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSize, detachKeys []byte, streams *AttachStreams, startContainer bool, wg *sync.WaitGroup) error {
- if startContainer && wg == nil {
- return errors.Wrapf(define.ErrInternal, "wait group not passed when startContainer set")
+func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSize, keys string, streams *AttachStreams, startContainer bool, started chan bool) error {
+ if startContainer && started == nil {
+ return errors.Wrapf(define.ErrInternal, "started chan not passed when startContainer set")
+ }
+
+ // Use default detach keys when keys aren't passed or specified in libpod.conf
+ if len(keys) == 0 {
+ keys = DefaultDetachKeys
+ }
+
+ // Check the validity of the provided keys
+ detachKeys := []byte{}
+ var err error
+ detachKeys, err = term.ToBytes(keys)
+ if err != nil {
+ return errors.Wrapf(err, "invalid detach keys")
}
kubeutils.HandleResizing(resize, func(size remotecommand.TerminalSize) {
@@ -78,7 +76,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
socketPath := c.AttachSocketPath()
- maxUnixLength := int(C.unix_path_length())
+ maxUnixLength := unixPathLength()
if maxUnixLength < len(socketPath) {
socketPath = socketPath[0:maxUnixLength]
}
@@ -97,7 +95,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
if err := c.start(); err != nil {
return err
}
- wg.Done()
+ started <- true
}
receiveStdoutError := make(chan error)
@@ -147,7 +145,9 @@ func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeO
default:
logrus.Infof("Received unexpected attach type %+d", buf[0])
}
-
+ if dst == nil {
+ return errors.New("output destination cannot be nil")
+ }
if doWrite {
nw, ew := dst.Write(buf[1:nr])
if ew != nil {
diff --git a/libpod/container_attach_linux_cgo.go b/libpod/container_attach_linux_cgo.go
new file mode 100644
index 000000000..d81243360
--- /dev/null
+++ b/libpod/container_attach_linux_cgo.go
@@ -0,0 +1,11 @@
+//+build linux,cgo
+
+package libpod
+
+//#include <sys/un.h>
+// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
+import "C"
+
+func unixPathLength() int {
+ return int(C.unix_path_length())
+}
diff --git a/libpod/container_attach_linux_nocgo.go b/libpod/container_attach_linux_nocgo.go
new file mode 100644
index 000000000..a514a555d
--- /dev/null
+++ b/libpod/container_attach_linux_nocgo.go
@@ -0,0 +1,7 @@
+//+build linux,!cgo
+
+package libpod
+
+func unixPathLength() int {
+ return 107
+}
diff --git a/libpod/container_attach_unsupported.go b/libpod/container_attach_unsupported.go
index 2c8718c67..c27ce0799 100644
--- a/libpod/container_attach_unsupported.go
+++ b/libpod/container_attach_unsupported.go
@@ -3,12 +3,10 @@
package libpod
import (
- "sync"
-
"github.com/containers/libpod/libpod/define"
"k8s.io/client-go/tools/remotecommand"
)
-func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, wg *sync.WaitGroup) error {
+func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error {
return define.ErrNotImplemented
}
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 82115455a..17586bfad 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/util"
is "github.com/containers/image/storage"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/pkg/errors"
@@ -48,7 +49,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
}
}
- if c.state.State == ContainerStateRunning && options.Pause {
+ if c.state.State == define.ContainerStateRunning && options.Pause {
if err := c.ociRuntime.pauseContainer(c); err != nil {
return nil, errors.Wrapf(err, "error pausing container %q", c.ID())
}
diff --git a/libpod/container_graph.go b/libpod/container_graph.go
index c266c5227..50dbdfbe4 100644
--- a/libpod/container_graph.go
+++ b/libpod/container_graph.go
@@ -244,13 +244,13 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
// Start the container (only if it is not running)
if !ctrErrored {
- if !restart && node.container.state.State != ContainerStateRunning {
+ if !restart && node.container.state.State != define.ContainerStateRunning {
if err := node.container.initAndStart(ctx); err != nil {
ctrErrored = true
ctrErrors[node.id] = err
}
}
- if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
+ if restart && node.container.state.State != define.ContainerStatePaused && node.container.state.State != define.ContainerStateUnknown {
if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
ctrErrored = true
ctrErrors[node.id] = err
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 3ac774060..2de78254c 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -5,6 +5,7 @@ import (
"time"
"github.com/containers/image/manifest"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/driver"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -144,6 +145,7 @@ type InspectContainerState struct {
OOMKilled bool `json:"OOMKilled"`
Dead bool `json:"Dead"`
Pid int `json:"Pid"`
+ ConmonPid int `json:"ConmonPid,omitempty"`
ExitCode int32 `json:"ExitCode"`
Error string `json:"Error"` // TODO
StartedAt time.Time `json:"StartedAt"`
@@ -204,12 +206,12 @@ func (c *Container) Inspect(size bool) (*InspectContainerData, error) {
func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*InspectContainerData, error) {
config := c.config
runtimeInfo := c.state
- spec, err := c.specFromState()
+ stateSpec, err := c.specFromState()
if err != nil {
return nil, err
}
- // Process is allowed to be nil in the spec
+ // Process is allowed to be nil in the stateSpec
args := []string{}
if config.Spec.Process != nil {
args = config.Spec.Process.Args
@@ -242,7 +244,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
}
}
- mounts, err := c.getInspectMounts(spec)
+ mounts, err := c.getInspectMounts(stateSpec)
if err != nil {
return nil, err
}
@@ -253,13 +255,14 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
Path: path,
Args: args,
State: &InspectContainerState{
- OciVersion: spec.Version,
+ OciVersion: stateSpec.Version,
Status: runtimeInfo.State.String(),
- Running: runtimeInfo.State == ContainerStateRunning,
- Paused: runtimeInfo.State == ContainerStatePaused,
+ Running: runtimeInfo.State == define.ContainerStateRunning,
+ Paused: runtimeInfo.State == define.ContainerStatePaused,
OOMKilled: runtimeInfo.OOMKilled,
Dead: runtimeInfo.State.String() == "bad state",
Pid: runtimeInfo.PID,
+ ConmonPid: runtimeInfo.ConmonPID,
ExitCode: runtimeInfo.ExitCode,
Error: "", // can't get yet
StartedAt: runtimeInfo.StartedTime,
@@ -282,9 +285,9 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
Driver: driverData.Name,
MountLabel: config.MountLabel,
ProcessLabel: config.ProcessLabel,
- EffectiveCaps: spec.Process.Capabilities.Effective,
- BoundingCaps: spec.Process.Capabilities.Bounding,
- AppArmorProfile: spec.Process.ApparmorProfile,
+ EffectiveCaps: stateSpec.Process.Capabilities.Effective,
+ BoundingCaps: stateSpec.Process.Capabilities.Bounding,
+ AppArmorProfile: stateSpec.Process.ApparmorProfile,
ExecIDs: execIDs,
GraphDriver: driverData,
Mounts: mounts,
@@ -335,7 +338,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
// Get information on the container's network namespace (if present)
data = c.getContainerNetworkInfo(data)
- inspectConfig, err := c.generateInspectContainerConfig(spec)
+ inspectConfig, err := c.generateInspectContainerConfig(stateSpec)
if err != nil {
return nil, err
}
@@ -367,58 +370,41 @@ func (c *Container) getInspectMounts(ctrSpec *spec.Spec) ([]InspectMount, error)
return inspectMounts, nil
}
- // We need to parse all named volumes and mounts into maps, so we don't
- // end up with repeated lookups for each user volume.
- // Map destination to struct, as destination is what is stored in
- // UserVolumes.
- namedVolumes := make(map[string]*ContainerNamedVolume)
- mounts := make(map[string]spec.Mount)
- for _, namedVol := range c.config.NamedVolumes {
- namedVolumes[namedVol.Dest] = namedVol
- }
- for _, mount := range ctrSpec.Mounts {
- mounts[mount.Destination] = mount
- }
+ namedVolumes, mounts := c.sortUserVolumes(ctrSpec)
+ for _, volume := range namedVolumes {
+ mountStruct := InspectMount{}
+ mountStruct.Type = "volume"
+ mountStruct.Destination = volume.Dest
+ mountStruct.Name = volume.Name
+
+ // For src and driver, we need to look up the named
+ // volume.
+ volFromDB, err := c.runtime.state.Volume(volume.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
+ }
+ mountStruct.Driver = volFromDB.Driver()
+ mountStruct.Source = volFromDB.MountPoint()
- for _, vol := range c.config.UserVolumes {
- // We need to look up the volumes.
- // First: is it a named volume?
- if volume, ok := namedVolumes[vol]; ok {
- mountStruct := InspectMount{}
- mountStruct.Type = "volume"
- mountStruct.Destination = volume.Dest
- mountStruct.Name = volume.Name
-
- // For src and driver, we need to look up the named
- // volume.
- volFromDB, err := c.runtime.state.Volume(volume.Name)
- if err != nil {
- return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
- }
- mountStruct.Driver = volFromDB.Driver()
- mountStruct.Source = volFromDB.MountPoint()
-
- parseMountOptionsForInspect(volume.Options, &mountStruct)
-
- inspectMounts = append(inspectMounts, mountStruct)
- } else if mount, ok := mounts[vol]; ok {
- // It's a mount.
- // Is it a tmpfs? If so, discard.
- if mount.Type == "tmpfs" {
- continue
- }
-
- mountStruct := InspectMount{}
- mountStruct.Type = "bind"
- mountStruct.Source = mount.Source
- mountStruct.Destination = mount.Destination
-
- parseMountOptionsForInspect(mount.Options, &mountStruct)
-
- inspectMounts = append(inspectMounts, mountStruct)
+ parseMountOptionsForInspect(volume.Options, &mountStruct)
+
+ inspectMounts = append(inspectMounts, mountStruct)
+ }
+ for _, mount := range mounts {
+ // It's a mount.
+ // Is it a tmpfs? If so, discard.
+ if mount.Type == "tmpfs" {
+ continue
}
- // We couldn't find a mount. Log a warning.
- logrus.Warnf("Could not find mount at destination %q when building inspect output for container %s", vol, c.ID())
+
+ mountStruct := InspectMount{}
+ mountStruct.Type = "bind"
+ mountStruct.Source = mount.Source
+ mountStruct.Destination = mount.Destination
+
+ parseMountOptionsForInspect(mount.Options, &mountStruct)
+
+ inspectMounts = append(inspectMounts, mountStruct)
}
return inspectMounts, nil
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index fcd6a990a..c409da96a 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -156,7 +156,7 @@ func (c *Container) waitForExitFileAndSync() error {
// Reset our state
c.state.ExitCode = -1
c.state.FinishedTime = time.Now()
- c.state.State = ContainerStateStopped
+ c.state.State = define.ContainerStateStopped
if err2 := c.save(); err2 != nil {
logrus.Errorf("Error saving container %s state: %v", c.ID(), err2)
@@ -241,9 +241,9 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
// Is the container running again?
// If so, we don't have to do anything
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return false, nil
- } else if c.state.State == ContainerStateUnknown {
+ } else if c.state.State == define.ContainerStateUnknown {
return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!")
}
@@ -267,13 +267,13 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
return false, err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, true); err != nil {
return false, err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Initialize the container
if err := c.init(ctx, true); err != nil {
return false, err
@@ -295,9 +295,9 @@ func (c *Container) syncContainer() error {
}
// If runtime knows about the container, update its status in runtime
// And then save back to disk
- if (c.state.State != ContainerStateUnknown) &&
- (c.state.State != ContainerStateConfigured) &&
- (c.state.State != ContainerStateExited) {
+ if (c.state.State != define.ContainerStateUnknown) &&
+ (c.state.State != define.ContainerStateConfigured) &&
+ (c.state.State != define.ContainerStateExited) {
oldState := c.state.State
// TODO: optionally replace this with a stat for the exit file
if err := c.ociRuntime.updateContainerStatus(c, false); err != nil {
@@ -307,8 +307,8 @@ func (c *Container) syncContainer() error {
if c.state.State != oldState {
// Check for a restart policy match
if c.config.RestartPolicy != RestartPolicyNone && c.config.RestartPolicy != RestartPolicyNo &&
- (oldState == ContainerStateRunning || oldState == ContainerStatePaused) &&
- (c.state.State == ContainerStateStopped || c.state.State == ContainerStateExited) &&
+ (oldState == define.ContainerStateRunning || oldState == define.ContainerStatePaused) &&
+ (c.state.State == define.ContainerStateStopped || c.state.State == define.ContainerStateExited) &&
!c.state.StoppedByUser {
c.state.RestartPolicyMatch = true
}
@@ -336,7 +336,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
}
- if c.state.State != ContainerStateConfigured {
+ if c.state.State != define.ContainerStateConfigured {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
}
@@ -352,6 +352,16 @@ func (c *Container) setupStorage(ctx context.Context) error {
},
LabelOpts: c.config.LabelOpts,
}
+ if c.restoreFromCheckpoint {
+ // If restoring from a checkpoint, the root file-system
+ // needs to be mounted with the same SELinux labels as
+ // it was mounted previously.
+ if options.Flags == nil {
+ options.Flags = make(map[string]interface{})
+ }
+ options.Flags["ProcessLabel"] = c.config.ProcessLabel
+ options.Flags["MountLabel"] = c.config.MountLabel
+ }
if c.config.Privileged {
privOpt := func(opt string) bool {
for _, privopt := range []string{"nodev", "nosuid", "noexec"} {
@@ -418,7 +428,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
// Tear down a container's storage prior to removal
func (c *Container) teardownStorage() error {
- if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
}
@@ -452,10 +462,11 @@ func (c *Container) teardownStorage() error {
// It does not save the results - assumes the database will do that for us
func resetState(state *ContainerState) error {
state.PID = 0
+ state.ConmonPID = 0
state.Mountpoint = ""
state.Mounted = false
- if state.State != ContainerStateExited {
- state.State = ContainerStateConfigured
+ if state.State != define.ContainerStateExited {
+ state.State = define.ContainerStateConfigured
}
state.ExecSessions = make(map[string]*ExecSession)
state.NetworkStatus = nil
@@ -554,7 +565,7 @@ func (c *Container) removeConmonFiles() error {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
}
- } else if err == nil {
+ } else {
// Rename should replace the old exit file (if it exists)
if err := os.Rename(exitFile, oldExitFile); err != nil {
return errors.Wrapf(err, "error renaming container %s exit file", c.ID())
@@ -567,11 +578,11 @@ func (c *Container) removeConmonFiles() error {
func (c *Container) export(path string) error {
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
- mount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
+ containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
if err != nil {
return errors.Wrapf(err, "error mounting container %q", c.ID())
}
- mountPoint = mount
+ mountPoint = containerMount
defer func() {
if _, err := c.runtime.store.Unmount(c.ID(), false); err != nil {
logrus.Errorf("error unmounting container %q: %v", c.ID(), err)
@@ -609,7 +620,7 @@ func (c *Container) isStopped() (bool, error) {
if err != nil {
return true, err
}
- return (c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused), nil
+ return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil
}
// save container state to the database
@@ -625,10 +636,10 @@ func (c *Container) save() error {
// Otherwise, this function will return with error if there are dependencies of this container that aren't running.
func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) {
// Container must be created or stopped to be started
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateCreated ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
+ if !(c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateCreated ||
+ c.state.State == define.ContainerStateStopped ||
+ c.state.State == define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
}
@@ -654,13 +665,13 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Or initialize it if necessary
if err := c.init(ctx, false); err != nil {
return err
@@ -763,7 +774,7 @@ func (c *Container) getAllDependencies(visited map[string]*Container) error {
}
// if the dependency is already running, we can assume its dependencies are also running
// so no need to add them to those we need to start
- if status != ContainerStateRunning {
+ if status != define.ContainerStateRunning {
visited[depID] = dep
if err := dep.getAllDependencies(visited); err != nil {
return err
@@ -795,7 +806,7 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
}
- if state != ContainerStateRunning {
+ if state != define.ContainerStateRunning {
notRunning = append(notRunning, dep)
}
depCtrs[dep] = depCtr
@@ -824,7 +835,7 @@ func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container
return nil, err
}
- if depCtr.state.State != ContainerStateRunning {
+ if depCtr.state.State != define.ContainerStateRunning {
notRunning = append(notRunning, dep)
}
}
@@ -855,18 +866,18 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
span.SetTag("struct", "container")
defer span.Finish()
- // Generate the OCI spec
- spec, err := c.generateSpec(ctx)
+ // Generate the OCI newSpec
+ newSpec, err := c.generateSpec(ctx)
if err != nil {
return err
}
- // Save the OCI spec to disk
- if err := c.saveSpec(spec); err != nil {
+ // Save the OCI newSpec to disk
+ if err := c.saveSpec(newSpec); err != nil {
return err
}
- // With the spec complete, do an OCI create
+ // With the newSpec complete, do an OCI create
if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil {
return err
}
@@ -875,7 +886,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
c.state.ExitCode = 0
c.state.Exited = false
- c.state.State = ContainerStateCreated
+ c.state.State = define.ContainerStateCreated
c.state.StoppedByUser = false
c.state.RestartPolicyMatch = false
@@ -906,7 +917,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// If the container is not ContainerStateStopped or
// ContainerStateCreated, do nothing.
- if c.state.State != ContainerStateStopped && c.state.State != ContainerStateCreated {
+ if c.state.State != define.ContainerStateStopped && c.state.State != define.ContainerStateCreated {
return nil
}
@@ -922,10 +933,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// If we were Stopped, we are now Exited, as we've removed ourself
// from the runtime.
// If we were Created, we are now Configured.
- if c.state.State == ContainerStateStopped {
- c.state.State = ContainerStateExited
- } else if c.state.State == ContainerStateCreated {
- c.state.State = ContainerStateConfigured
+ if c.state.State == define.ContainerStateStopped {
+ c.state.State = define.ContainerStateExited
+ } else if c.state.State == define.ContainerStateCreated {
+ c.state.State = define.ContainerStateConfigured
}
if c.valid {
@@ -964,16 +975,16 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
// Does not lock or check validity
func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateUnknown, throw an error
- if c.state.State == ContainerStateUnknown {
+ if c.state.State == define.ContainerStateUnknown {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
}
// If we are running, do nothing
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
return nil
}
// If we are paused, throw an error
- if c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
}
@@ -991,14 +1002,14 @@ func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateStopped we need to remove from runtime
// And reset to ContainerStateConfigured
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
logrus.Debugf("Recreating container %s in OCI runtime", c.ID())
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
if err := c.init(ctx, false); err != nil {
return err
}
@@ -1019,7 +1030,7 @@ func (c *Container) start() error {
}
logrus.Debugf("Started container %s", c.ID())
- c.state.State = ContainerStateRunning
+ c.state.State = define.ContainerStateRunning
if c.config.HealthCheckConfig != nil {
if err := c.updateHealthStatus(HealthCheckStarting); err != nil {
@@ -1043,6 +1054,8 @@ func (c *Container) stop(timeout uint) error {
return err
}
+ c.state.PID = 0
+ c.state.ConmonPID = 0
c.state.StoppedByUser = true
if err := c.save(); err != nil {
return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())
@@ -1060,7 +1073,7 @@ func (c *Container) pause() error {
logrus.Debugf("Paused container %s", c.ID())
- c.state.State = ContainerStatePaused
+ c.state.State = define.ContainerStatePaused
return c.save()
}
@@ -1073,20 +1086,20 @@ func (c *Container) unpause() error {
logrus.Debugf("Unpaused container %s", c.ID())
- c.state.State = ContainerStateRunning
+ c.state.State = define.ContainerStateRunning
return c.save()
}
// Internal, non-locking function to restart a container
func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) {
- if c.state.State == ContainerStateUnknown || c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateUnknown || c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
}
c.newContainerEvent(events.Restart)
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
if err := c.stop(timeout); err != nil {
return err
}
@@ -1102,13 +1115,13 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e
return err
}
- if c.state.State == ContainerStateStopped {
+ if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, false); err != nil {
return err
}
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
+ } else if c.state.State == define.ContainerStateConfigured ||
+ c.state.State == define.ContainerStateExited {
// Initialize the container
if err := c.init(ctx, false); err != nil {
return err
@@ -1164,8 +1177,8 @@ func (c *Container) cleanupStorage() error {
return nil
}
- for _, mount := range c.config.Mounts {
- if err := c.unmountSHM(mount); err != nil {
+ for _, containerMount := range c.config.Mounts {
+ if err := c.unmountSHM(containerMount); err != nil {
return err
}
}
@@ -1396,14 +1409,14 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten
}
return nil, err
}
- hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
+ ociHooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
if err != nil {
return nil, err
}
- if len(hooks) > 0 || config.Hooks != nil {
- logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir)
+ if len(ociHooks) > 0 || config.Hooks != nil {
+ logrus.Warnf("implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
}
- for i, hook := range hooks {
+ for i, hook := range ociHooks {
allHooks[i] = hook
}
}
@@ -1482,12 +1495,12 @@ func (c *Container) copyWithTarFromImage(src, dest string) error {
// If it is, we'll remove the container anyways.
// Returns nil if safe to remove, or an error describing why it's unsafe if not.
func (c *Container) checkReadyForRemoval() error {
- if c.state.State == ContainerStateUnknown {
+ if c.state.State == define.ContainerStateUnknown {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
}
- if c.state.State == ContainerStateRunning ||
- c.state.State == ContainerStatePaused {
+ if c.state.State == define.ContainerStateRunning ||
+ c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String())
}
@@ -1534,3 +1547,34 @@ func (c *Container) prepareCheckpointExport() (err error) {
return nil
}
+
+// sortUserVolumes sorts the volumes specified for a container
+// between named and normal volumes
+func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) {
+ namedUserVolumes := []*ContainerNamedVolume{}
+ userMounts := []spec.Mount{}
+
+ // We need to parse all named volumes and mounts into maps, so we don't
+ // end up with repeated lookups for each user volume.
+ // Map destination to struct, as destination is what is stored in
+ // UserVolumes.
+ namedVolumes := make(map[string]*ContainerNamedVolume)
+ mounts := make(map[string]spec.Mount)
+ for _, namedVol := range c.config.NamedVolumes {
+ namedVolumes[namedVol.Dest] = namedVol
+ }
+ for _, mount := range ctrSpec.Mounts {
+ mounts[mount.Destination] = mount
+ }
+
+ for _, vol := range c.config.UserVolumes {
+ if volume, ok := namedVolumes[vol]; ok {
+ namedUserVolumes = append(namedUserVolumes, volume)
+ } else if mount, ok := mounts[vol]; ok {
+ userMounts = append(userMounts, mount)
+ } else {
+ logrus.Warnf("Could not find mount at destination %q when parsing user volumes for container %s", vol, c.ID())
+ }
+ }
+ return namedUserVolumes, userMounts
+}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index e93e0cad8..686a595de 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -185,9 +185,13 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// If network namespace was requested, add it now
if c.config.CreateNetNS {
if c.config.PostConfigureNetNS {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, "")
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, ""); err != nil {
+ return nil, err
+ }
} else {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil {
+ return nil, err
+ }
}
}
@@ -415,7 +419,9 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if rootPropagation != "" {
logrus.Debugf("set root propagation to %q", rootPropagation)
- g.SetLinuxRootPropagation(rootPropagation)
+ if err := g.SetLinuxRootPropagation(rootPropagation); err != nil {
+ return nil, err
+ }
}
// Warning: precreate hooks may alter g.Config in place.
@@ -561,7 +567,9 @@ func (c *Container) checkpointRestoreLabelLog(fileName string) (err error) {
if err != nil {
return errors.Wrapf(err, "failed to create CRIU log file %q", dumpLog)
}
- logFile.Close()
+ if err := logFile.Close(); err != nil {
+ logrus.Errorf("unable to close log file: %q", err)
+ }
if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
return errors.Wrapf(err, "failed to label CRIU log file %q", dumpLog)
}
@@ -573,7 +581,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return err
}
- if c.state.State != ContainerStateRunning {
+ if c.state.State != define.ContainerStateRunning {
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
@@ -605,7 +613,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
logrus.Debugf("Checkpointed container %s", c.ID())
if !options.KeepRunning {
- c.state.State = ContainerStateStopped
+ c.state.State = define.ContainerStateStopped
// Cleanup Storage and Network
if err := c.cleanup(ctx); err != nil {
@@ -620,9 +628,11 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
"config.dump",
"spec.dump",
}
- for _, delete := range cleanup {
- file := filepath.Join(c.bundlePath(), delete)
- os.Remove(file)
+ for _, del := range cleanup {
+ file := filepath.Join(c.bundlePath(), del)
+ if err := os.Remove(file); err != nil {
+ logrus.Debugf("unable to remove file %s", file)
+ }
}
}
@@ -664,7 +674,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return err
}
- if (c.state.State != ContainerStateConfigured) && (c.state.State != ContainerStateExited) {
+ if (c.state.State != define.ContainerStateConfigured) && (c.state.State != define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
}
@@ -702,7 +712,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
return err
}
- json.Unmarshal(networkJSON, &networkStatus)
+ if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
+ return err
+ }
// Take the first IP address
var IP net.IP
if len(networkStatus) > 0 {
@@ -744,7 +756,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// We want to have the same network namespace as before.
if c.config.CreateNetNS {
- g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
+ if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil {
+ return err
+ }
}
if err := c.makeBindMounts(); err != nil {
@@ -769,7 +783,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
// Cleanup for a working restore.
- c.removeConmonFiles()
+ if err := c.removeConmonFiles(); err != nil {
+ return err
+ }
// Save the OCI spec to disk
if err := c.saveSpec(g.Spec()); err != nil {
@@ -781,7 +797,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
logrus.Debugf("Restored container %s", c.ID())
- c.state.State = ContainerStateRunning
+ c.state.State = define.ContainerStateRunning
if !options.Keep {
// Delete all checkpoint related files. At this point, in theory, all files
@@ -793,8 +809,8 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
logrus.Debugf("Non-fatal: removal of checkpoint directory (%s) failed: %v", c.CheckpointPath(), err)
}
cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status"}
- for _, delete := range cleanup {
- file := filepath.Join(c.bundlePath(), delete)
+ for _, del := range cleanup {
+ file := filepath.Join(c.bundlePath(), del)
err = os.Remove(file)
if err != nil {
logrus.Debugf("Non-fatal: removal of checkpoint file (%s) failed: %v", file, err)
@@ -824,14 +840,14 @@ func (c *Container) makeBindMounts() error {
// will recreate. Only do this if we aren't sharing them with
// another container.
if c.config.NetNsCtr == "" {
- if path, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
- if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
+ if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error removing container %s resolv.conf", c.ID())
}
delete(c.state.BindMounts, "/etc/resolv.conf")
}
- if path, ok := c.state.BindMounts["/etc/hosts"]; ok {
- if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
+ if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
+ if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error removing container %s hosts", c.ID())
}
delete(c.state.BindMounts, "/etc/hosts")
@@ -968,10 +984,10 @@ func (c *Container) makeBindMounts() error {
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() (string, error) {
resolvConf := "/etc/resolv.conf"
- for _, ns := range c.config.Spec.Linux.Namespaces {
- if ns.Type == spec.NetworkNamespace {
- if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
- definedPath := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
+ for _, namespace := range c.config.Spec.Linux.Namespaces {
+ if namespace.Type == spec.NetworkNamespace {
+ if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") {
+ definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf")
_, err := os.Stat(definedPath)
if err == nil {
resolvConf = definedPath
@@ -1096,10 +1112,10 @@ func (c *Container) generatePasswd() (string, error) {
if c.config.User == "" {
return "", nil
}
- spec := strings.SplitN(c.config.User, ":", 2)
- userspec := spec[0]
- if len(spec) > 1 {
- groupspec = spec[1]
+ splitSpec := strings.SplitN(c.config.User, ":", 2)
+ userspec := splitSpec[0]
+ if len(splitSpec) > 1 {
+ groupspec = splitSpec[1]
}
// If a non numeric User, then don't generate passwd
uid, err := strconv.ParseUint(userspec, 10, 32)
@@ -1137,7 +1153,7 @@ func (c *Container) generatePasswd() (string, error) {
if err != nil {
return "", errors.Wrapf(err, "failed to create temporary passwd file")
}
- if os.Chmod(passwdFile, 0644); err != nil {
+ if err := os.Chmod(passwdFile, 0644); err != nil {
return "", err
}
return passwdFile, nil
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index e549673a6..8a87a8796 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -9,6 +9,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/logs"
journal "github.com/coreos/go-systemd/sdjournal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -27,7 +28,7 @@ const (
bufLen = 16384
)
-func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLine) error {
+func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
var config journal.JournalReaderConfig
config.NumFromTail = options.Tail
config.Formatter = journalFormatter
@@ -79,7 +80,7 @@ func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLin
// because we are reusing bytes, we need to make
// sure the old data doesn't get into the new line
bytestr := string(bytes[:ec])
- logLine, err2 := newLogLine(bytestr)
+ logLine, err2 := logs.NewLogLine(bytestr)
if err2 != nil {
logrus.Error(err2)
continue
@@ -98,7 +99,7 @@ func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLin
func journalFormatter(entry *journal.JournalEntry) (string, error) {
usec := entry.RealtimeTimestamp
- tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logTimeFormat)
+ tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logs.LogTimeFormat)
output := fmt.Sprintf("%s ", tsString)
priority, ok := entry.Fields["PRIORITY"]
if !ok {
@@ -114,9 +115,9 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) {
// if CONTAINER_PARTIAL_MESSAGE is defined, the log type is "P"
if _, ok := entry.Fields["CONTAINER_PARTIAL_MESSAGE"]; ok {
- output += fmt.Sprintf("%s ", partialLogType)
+ output += fmt.Sprintf("%s ", logs.PartialLogType)
} else {
- output += fmt.Sprintf("%s ", fullLogType)
+ output += fmt.Sprintf("%s ", logs.FullLogType)
}
// Finally, append the message
@@ -129,12 +130,12 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) {
}
type FollowBuffer struct {
- logChannel chan *LogLine
+ logChannel chan *logs.LogLine
}
func (f FollowBuffer) Write(p []byte) (int, error) {
bytestr := string(p)
- logLine, err := newLogLine(bytestr)
+ logLine, err := logs.NewLogLine(bytestr)
if err != nil {
return -1, err
}
diff --git a/libpod/container_log_unsupported.go b/libpod/container_log_unsupported.go
index 380d317b5..2c4492b10 100644
--- a/libpod/container_log_unsupported.go
+++ b/libpod/container_log_unsupported.go
@@ -4,9 +4,10 @@ package libpod
import (
"github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/logs"
"github.com/pkg/errors"
)
-func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLine) error {
+func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
}
diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go
index 2e0e83c05..ce471838d 100644
--- a/libpod/container_top_linux.go
+++ b/libpod/container_top_linux.go
@@ -6,6 +6,7 @@ import (
"strconv"
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/psgo"
"github.com/pkg/errors"
@@ -18,7 +19,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "unable to look up state for %s", c.ID())
}
- if conStat != ContainerStateRunning {
+ if conStat != define.ContainerStateRunning {
return nil, errors.Errorf("top can only be used on running containers")
}
@@ -60,9 +61,3 @@ func (c *Container) GetContainerPidInformation(descriptors []string) ([]string,
}
return res, nil
}
-
-// GetContainerPidInformationDescriptors returns a string slice of all supported
-// format descriptors of GetContainerPidInformation.
-func GetContainerPidInformationDescriptors() ([]string, error) {
- return psgo.ListDescriptors(), nil
-}
diff --git a/libpod/container_top_unsupported.go b/libpod/container_top_unsupported.go
index 2117f913d..382c98b54 100644
--- a/libpod/container_top_unsupported.go
+++ b/libpod/container_top_unsupported.go
@@ -15,9 +15,3 @@ import "github.com/containers/libpod/libpod/define"
func (c *Container) GetContainerPidInformation(descriptors []string) ([]string, error) {
return nil, define.ErrNotImplemented
}
-
-// GetContainerPidInformationDescriptors returns a string slice of all supported
-// format descriptors of GetContainerPidInformation.
-func GetContainerPidInformationDescriptors() ([]string, error) {
- return nil, define.ErrNotImplemented
-}
diff --git a/libpod/define/config.go b/libpod/define/config.go
index 256a4b21f..d8d6ccf55 100644
--- a/libpod/define/config.go
+++ b/libpod/define/config.go
@@ -3,8 +3,18 @@ package define
var (
// DefaultInitPath is the default path to the container-init binary
DefaultInitPath = "/usr/libexec/podman/catatonit"
+ // DefaultInfraImage to use for infra container
+ DefaultInfraImage = "k8s.gcr.io/pause:3.1"
+ // DefaultInfraCommand to be run in an infra container
+ DefaultInfraCommand = "/pause"
)
// CtrRemoveTimeout is the default number of seconds to wait after stopping a container
// before sending the kill signal
const CtrRemoveTimeout = 10
+
+// InfoData holds the info type, i.e store, host etc and the data for each type
+type InfoData struct {
+ Type string
+ Data map[string]interface{}
+}
diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go
new file mode 100644
index 000000000..ab2527b3e
--- /dev/null
+++ b/libpod/define/containerstate.go
@@ -0,0 +1,73 @@
+package define
+
+import "github.com/pkg/errors"
+
+// ContainerStatus represents the current state of a container
+type ContainerStatus int
+
+const (
+ // ContainerStateUnknown indicates that the container is in an error
+ // state where information about it cannot be retrieved
+ ContainerStateUnknown ContainerStatus = iota
+ // ContainerStateConfigured indicates that the container has had its
+ // storage configured but it has not been created in the OCI runtime
+ ContainerStateConfigured ContainerStatus = iota
+ // ContainerStateCreated indicates the container has been created in
+ // the OCI runtime but not started
+ ContainerStateCreated ContainerStatus = iota
+ // ContainerStateRunning indicates the container is currently executing
+ ContainerStateRunning ContainerStatus = iota
+ // ContainerStateStopped indicates that the container was running but has
+ // exited
+ ContainerStateStopped ContainerStatus = iota
+ // ContainerStatePaused indicates that the container has been paused
+ ContainerStatePaused ContainerStatus = iota
+ // ContainerStateExited indicates the the container has stopped and been
+ // cleaned up
+ ContainerStateExited ContainerStatus = iota
+)
+
+// ContainerStatus returns a string representation for users
+// of a container state
+func (t ContainerStatus) String() string {
+ switch t {
+ case ContainerStateUnknown:
+ return "unknown"
+ case ContainerStateConfigured:
+ return "configured"
+ case ContainerStateCreated:
+ return "created"
+ case ContainerStateRunning:
+ return "running"
+ case ContainerStateStopped:
+ return "stopped"
+ case ContainerStatePaused:
+ return "paused"
+ case ContainerStateExited:
+ return "exited"
+ }
+ return "bad state"
+}
+
+// StringToContainerStatus converts a string representation of a containers
+// status into an actual container status type
+func StringToContainerStatus(status string) (ContainerStatus, error) {
+ switch status {
+ case ContainerStateUnknown.String():
+ return ContainerStateUnknown, nil
+ case ContainerStateConfigured.String():
+ return ContainerStateConfigured, nil
+ case ContainerStateCreated.String():
+ return ContainerStateCreated, nil
+ case ContainerStateRunning.String():
+ return ContainerStateRunning, nil
+ case ContainerStateStopped.String():
+ return ContainerStateStopped, nil
+ case ContainerStatePaused.String():
+ return ContainerStatePaused, nil
+ case ContainerStateExited.String():
+ return ContainerStateExited, nil
+ default:
+ return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
+ }
+}
diff --git a/libpod/version.go b/libpod/define/version.go
index d2b99a275..0f9f49050 100644
--- a/libpod/version.go
+++ b/libpod/define/version.go
@@ -1,4 +1,4 @@
-package libpod
+package define
import (
"runtime"
diff --git a/libpod/events.go b/libpod/events.go
index 13bb5bdde..be21e510a 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -1,7 +1,10 @@
package libpod
import (
+ "fmt"
+
"github.com/containers/libpod/libpod/events"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -79,3 +82,55 @@ func (r *Runtime) Events(options events.ReadOptions) error {
}
return eventer.Read(options)
}
+
+// GetEvents reads the event log and returns events based on input filters
+func (r *Runtime) GetEvents(filters []string) ([]*events.Event, error) {
+ var (
+ logEvents []*events.Event
+ readErr error
+ )
+ eventChannel := make(chan *events.Event)
+ options := events.ReadOptions{
+ EventChannel: eventChannel,
+ Filters: filters,
+ FromStart: true,
+ Stream: false,
+ }
+ eventer, err := r.newEventer()
+ if err != nil {
+ return nil, err
+ }
+ go func() {
+ readErr = eventer.Read(options)
+ }()
+ if readErr != nil {
+ return nil, readErr
+ }
+ for e := range eventChannel {
+ logEvents = append(logEvents, e)
+ }
+ return logEvents, nil
+}
+
+// GetLastContainerEvent takes a container name or ID and an event status and returns
+// the last occurrence of the container event
+func (r *Runtime) GetLastContainerEvent(nameOrID string, containerEvent events.Status) (*events.Event, error) {
+ // check to make sure the event.Status is valid
+ if _, err := events.StringToStatus(containerEvent.String()); err != nil {
+ return nil, err
+ }
+ filters := []string{
+ fmt.Sprintf("container=%s", nameOrID),
+ fmt.Sprintf("event=%s", containerEvent),
+ "type=container",
+ }
+ containerEvents, err := r.GetEvents(filters)
+ if err != nil {
+ return nil, err
+ }
+ if len(containerEvents) < 1 {
+ return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String())
+ }
+ // return the last element in the slice
+ return containerEvents[len(containerEvents)-1], nil
+}
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 810988205..b9f01f3a5 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -2,6 +2,8 @@ package events
import (
"time"
+
+ "github.com/pkg/errors"
)
// EventerType ...
@@ -158,3 +160,12 @@ const (
// EventFilter for filtering events
type EventFilter func(*Event) bool
+
+var (
+ // ErrEventTypeBlank indicates the event log found something done by podman
+ // but it isnt likely an event
+ ErrEventTypeBlank = errors.New("event type blank")
+
+ // ErrEventNotFound indicates that the event was not found in the event log
+ ErrEventNotFound = errors.New("unable to find event")
+)
diff --git a/libpod/events/events.go b/libpod/events/events.go
index 1ec79bcd7..2bebff162 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -95,6 +95,8 @@ func StringToType(name string) (Type, error) {
return System, nil
case Volume.String():
return Volume, nil
+ case "":
+ return "", ErrEventTypeBlank
}
return "", errors.Errorf("unknown event type %q", name)
}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 78a630e9a..d5bce4334 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -101,7 +101,9 @@ func (e EventJournalD) Read(options ReadOptions) error {
// We can't decode this event.
// Don't fail hard - that would make events unusable.
// Instead, log and continue.
- logrus.Errorf("Unable to decode event: %v", err)
+ if errors.Cause(err) != ErrEventTypeBlank {
+ logrus.Errorf("Unable to decode event: %v", err)
+ }
continue
}
include := true
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index 3e36a2c95..f4ea6c694 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -9,6 +9,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -169,7 +170,7 @@ func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) {
if err != nil {
return HealthCheckInternalError, err
}
- if cstate != ContainerStateRunning {
+ if cstate != define.ContainerStateRunning {
return HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 89a68a1bd..76e46f74f 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -660,11 +660,7 @@ func (i *Image) Size(ctx context.Context) (*uint64, error) {
// DriverData gets the driver data from the store on a layer
func (i *Image) DriverData() (*driver.Data, error) {
- topLayer, err := i.Layer()
- if err != nil {
- return nil, err
- }
- return driver.GetDriverData(i.imageruntime.store, topLayer.ID)
+ return driver.GetDriverData(i.imageruntime.store, i.TopLayer())
}
// Layer returns the image's top layer
@@ -693,13 +689,17 @@ func (i *Image) History(ctx context.Context) ([]*History, error) {
return nil, err
}
- // Use our layers list to find images that use one of them as its
+ // Use our layers list to find images that use any of them (or no
+ // layer, since every base layer is derived from an empty layer) as its
// topmost layer.
interestingLayers := make(map[string]bool)
- layer, err := i.imageruntime.store.Layer(i.TopLayer())
- if err != nil {
- return nil, err
+ var layer *storage.Layer
+ if i.TopLayer() != "" {
+ if layer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil {
+ return nil, err
+ }
}
+ interestingLayers[""] = true
for layer != nil {
interestingLayers[layer.ID] = true
if layer.Parent == "" {
@@ -795,27 +795,6 @@ func (i *Image) History(ctx context.Context) ([]*History, error) {
return allHistory, nil
}
-// historyLayerIDs goes through the images in store and checks if the top layer of an image
-// is the same as the parent of topLayerID
-func (i *Image) historyLayerIDs(topLayerID string, images []*Image, IDs *[]string) error {
- for _, image := range images {
- // Get the layer info of topLayerID
- layer, err := i.imageruntime.store.Layer(topLayerID)
- if err != nil {
- return errors.Wrapf(err, "error getting layer info %q", topLayerID)
- }
- // Check if the parent of layer is equal to the image's top layer
- // If so add the image ID to the list of IDs and find the parent of
- // the top layer of the image ID added to the list
- // Since we are checking for parent, each top layer can only have one parent
- if layer.Parent == image.TopLayer() {
- *IDs = append(*IDs, image.ID())
- return i.historyLayerIDs(image.TopLayer(), images, IDs)
- }
- }
- return nil
-}
-
// Dangling returns a bool if the image is "dangling"
func (i *Image) Dangling() bool {
return len(i.Names()) == 0
@@ -1143,13 +1122,15 @@ func areParentAndChild(parent, child *imgspecv1.Image) bool {
// GetParent returns the image ID of the parent. Return nil if a parent is not found.
func (i *Image) GetParent(ctx context.Context) (*Image, error) {
+ var childLayer *storage.Layer
images, err := i.imageruntime.GetImages()
if err != nil {
return nil, err
}
- childLayer, err := i.imageruntime.store.Layer(i.TopLayer())
- if err != nil {
- return nil, err
+ if i.TopLayer() != "" {
+ if childLayer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil {
+ return nil, err
+ }
}
// fetch the configuration for the child image
child, err := i.ociv1Image(ctx)
@@ -1161,11 +1142,23 @@ func (i *Image) GetParent(ctx context.Context) (*Image, error) {
continue
}
candidateLayer := img.TopLayer()
- // as a child, our top layer is either the candidate parent's
- // layer, or one that's derived from it, so skip over any
- // candidate image where we know that isn't the case
- if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID {
- continue
+ // as a child, our top layer, if we have one, is either the
+ // candidate parent's layer, or one that's derived from it, so
+ // skip over any candidate image where we know that isn't the
+ // case
+ if childLayer != nil {
+ // The child has at least one layer, so a parent would
+ // have a top layer that's either the same as the child's
+ // top layer or the top layer's recorded parent layer,
+ // which could be an empty value.
+ if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID {
+ continue
+ }
+ } else {
+ // The child has no layers, but the candidate does.
+ if candidateLayer != "" {
+ continue
+ }
}
// fetch the configuration for the candidate image
candidate, err := img.ociv1Image(ctx)
@@ -1204,14 +1197,22 @@ func (i *Image) getChildren(ctx context.Context, max int) ([]string, error) {
if img.ID() == i.ID() {
continue
}
- candidateLayer, err := img.Layer()
- if err != nil {
- return nil, err
- }
- // if this image's top layer is not our top layer, and is not
- // based on our top layer, we can skip it
- if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer {
- continue
+ if img.TopLayer() == "" {
+ if parentLayer != "" {
+ // this image has no layers, but we do, so
+ // it can't be derived from this one
+ continue
+ }
+ } else {
+ candidateLayer, err := img.Layer()
+ if err != nil {
+ return nil, err
+ }
+ // if this image's top layer is not our top layer, and is not
+ // based on our top layer, we can skip it
+ if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer {
+ continue
+ }
}
// fetch the configuration for the candidate image
candidate, err := img.ociv1Image(ctx)
@@ -1443,6 +1444,7 @@ func GetLayersMapWithImageInfo(imageruntime *Runtime) (map[string]*LayerInfo, er
if err != nil {
return nil, err
}
+ layerInfoMap[""] = &LayerInfo{}
for _, img := range imgs {
e, ok := layerInfoMap[img.TopLayer]
if !ok {
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 644a9ae86..e5765febc 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -19,8 +19,8 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/registries"
- multierror "github.com/hashicorp/go-multierror"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/hashicorp/go-multierror"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
diff --git a/libpod/info.go b/libpod/info.go
index c96293e3d..4a89fa648 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -19,12 +19,6 @@ import (
"github.com/pkg/errors"
)
-// InfoData holds the info type, i.e store, host etc and the data for each type
-type InfoData struct {
- Type string
- Data map[string]interface{}
-}
-
// top-level "host" info
func (r *Runtime) hostInfo() (map[string]interface{}, error) {
// lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime
diff --git a/libpod/kube.go b/libpod/kube.go
index 1622246d5..409937010 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -3,6 +3,7 @@ package libpod
import (
"fmt"
"math/rand"
+ "os"
"strconv"
"strings"
"time"
@@ -16,7 +17,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -132,32 +132,43 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
var (
podContainers []v1.Container
)
+ deDupPodVolumes := make(map[string]*v1.Volume)
first := true
for _, ctr := range containers {
if !ctr.IsInfra() {
- result, err := containerToV1Container(ctr)
+ ctr, volumes, err := containerToV1Container(ctr)
if err != nil {
return nil, err
}
// Since port bindings for the pod are handled by the
// infra container, wipe them here.
- result.Ports = nil
+ ctr.Ports = nil
// We add the original port declarations from the libpod infra container
// to the first kubernetes container description because otherwise we loose
// the original container/port bindings.
if first && len(ports) > 0 {
- result.Ports = ports
+ ctr.Ports = ports
first = false
}
- podContainers = append(podContainers, result)
+ podContainers = append(podContainers, ctr)
+ // Deduplicate volumes, so if containers in the pod share a volume, it's only
+ // listed in the volumes section once
+ for _, vol := range volumes {
+ deDupPodVolumes[vol.Name] = &vol
+ }
}
}
- return addContainersToPodObject(podContainers, p.Name()), nil
+ podVolumes := make([]v1.Volume, 0, len(deDupPodVolumes))
+ for _, vol := range deDupPodVolumes {
+ podVolumes = append(podVolumes, *vol)
+ }
+
+ return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name()), nil
}
-func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod {
+func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string) *v1.Pod {
tm := v12.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
@@ -177,6 +188,7 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
}
ps := v1.PodSpec{
Containers: containers,
+ Volumes: volumes,
}
p := v1.Pod{
TypeMeta: tm,
@@ -190,56 +202,58 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
// for a single container. we "insert" that container description in a pod.
func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
var containers []v1.Container
- result, err := containerToV1Container(ctr)
+ kubeCtr, kubeVols, err := containerToV1Container(ctr)
if err != nil {
return nil, err
}
- containers = append(containers, result)
- return addContainersToPodObject(containers, ctr.Name()), nil
+ containers = append(containers, kubeCtr)
+ return addContainersAndVolumesToPodObject(containers, kubeVols, ctr.Name()), nil
}
// containerToV1Container converts information we know about a libpod container
// to a V1.Container specification.
-func containerToV1Container(c *Container) (v1.Container, error) {
+func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
kubeContainer := v1.Container{}
+ kubeVolumes := []v1.Volume{}
kubeSec, err := generateKubeSecurityContext(c)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
if len(c.config.Spec.Linux.Devices) > 0 {
// TODO Enable when we can support devices and their names
devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
kubeContainer.VolumeDevices = devices
- return kubeContainer, errors.Wrapf(define.ErrNotImplemented, "linux devices")
+ return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices")
}
if len(c.config.UserVolumes) > 0 {
// TODO When we until we can resolve what the volume name should be, this is disabled
// Volume names need to be coordinated "globally" in the kube files.
- volumes, err := libpodMountsToKubeVolumeMounts(c)
+ volumeMounts, volumes, err := libpodMountsToKubeVolumeMounts(c)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
- kubeContainer.VolumeMounts = volumes
+ kubeContainer.VolumeMounts = volumeMounts
+ kubeVolumes = append(kubeVolumes, volumes...)
}
envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
portmappings, err := c.PortMappings()
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
ports, err := ocicniPortMappingToContainerPort(portmappings)
if err != nil {
- return kubeContainer, err
+ return kubeContainer, kubeVolumes, err
}
containerCommands := c.Command()
@@ -263,7 +277,7 @@ func containerToV1Container(c *Container) (v1.Container, error) {
kubeContainer.StdinOnce = false
kubeContainer.TTY = c.config.Spec.Process.Terminal
- return kubeContainer, nil
+ return kubeContainer, kubeVolumes, nil
}
// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
@@ -309,52 +323,82 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
return envVars, nil
}
-// Is this worth it?
-func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceList) { //nolint
- // It does not appear we can properly calculate CPU resources from the information
- // we know in libpod. Libpod knows CPUs by time, shares, etc.
+// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
+func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, error) {
+ var vms []v1.VolumeMount
+ var vos []v1.Volume
- // We also only know about a memory limit; no memory minimum
- maxResources := make(map[v1.ResourceName]resource.Quantity)
- minResources := make(map[v1.ResourceName]resource.Quantity)
- config := c.Config()
- maxMem := config.Spec.Linux.Resources.Memory.Limit
+ // TjDO when named volumes are supported in play kube, also parse named volumes here
+ _, mounts := c.sortUserVolumes(c.config.Spec)
+ for _, m := range mounts {
+ vm, vo, err := generateKubeVolumeMount(m)
+ if err != nil {
+ return vms, vos, err
+ }
+ vms = append(vms, vm)
+ vos = append(vos, vo)
+ }
+ return vms, vos, nil
+}
- _ = maxMem
+// generateKubeVolumeMount takes a user specfied mount and returns
+// a kubernetes VolumeMount (to be added to the container) and a kubernetes Volume
+// (to be added to the pod)
+func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) {
+ vm := v1.VolumeMount{}
+ vo := v1.Volume{}
- return maxResources, minResources
+ name, err := convertVolumePathToName(m.Source)
+ if err != nil {
+ return vm, vo, err
+ }
+ vm.Name = name
+ vm.MountPath = m.Destination
+ if util.StringInSlice("ro", m.Options) {
+ vm.ReadOnly = true
+ }
+
+ vo.Name = name
+ vo.HostPath = &v1.HostPathVolumeSource{}
+ vo.HostPath.Path = m.Source
+ isDir, err := isHostPathDirectory(m.Source)
+ // neither a directory or a file lives here, default to creating a directory
+ // TODO should this be an error instead?
+ var hostPathType v1.HostPathType
+ if err != nil {
+ hostPathType = v1.HostPathDirectoryOrCreate
+ } else if isDir {
+ hostPathType = v1.HostPathDirectory
+ } else {
+ hostPathType = v1.HostPathFile
+ }
+ vo.HostPath.Type = &hostPathType
+
+ return vm, vo, nil
}
-func generateKubeVolumeMount(hostSourcePath string, mounts []specs.Mount) (v1.VolumeMount, error) {
- vm := v1.VolumeMount{}
- for _, m := range mounts {
- if m.Source == hostSourcePath {
- // TODO Name is not provided and is required by Kube; therefore, this is disabled earlier
- //vm.Name =
- vm.MountPath = m.Source
- vm.SubPath = m.Destination
- if util.StringInSlice("ro", m.Options) {
- vm.ReadOnly = true
- }
- return vm, nil
- }
+func isHostPathDirectory(hostPathSource string) (bool, error) {
+ info, err := os.Stat(hostPathSource)
+ if err != nil {
+ return false, err
}
- return vm, errors.New("unable to find mount source")
+ return info.Mode().IsDir(), nil
}
-// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
-func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) {
- // At this point, I dont think we can distinguish between the default
- // volume mounts and user added ones. For now, we pass them all.
- var vms []v1.VolumeMount
- for _, hostSourcePath := range c.config.UserVolumes {
- vm, err := generateKubeVolumeMount(hostSourcePath, c.config.Spec.Mounts)
- if err != nil {
- continue
+func convertVolumePathToName(hostSourcePath string) (string, error) {
+ if len(hostSourcePath) == 0 {
+ return "", errors.Errorf("hostSourcePath must be specified to generate volume name")
+ }
+ if len(hostSourcePath) == 1 {
+ if hostSourcePath != "/" {
+ return "", errors.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
}
- vms = append(vms, vm)
+ // add special case name
+ return "root", nil
}
- return vms, nil
+ // First, trim trailing slashes, then replace slashes with dashes.
+ // Thus, /mnt/data/ will become mnt-data
+ return strings.Replace(strings.Trim(hostSourcePath, "/"), "/", "-", -1), nil
}
func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
@@ -366,16 +410,14 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
// those indicate a dropped cap
for _, capability := range defaultCaps {
if !util.StringInSlice(capability, containerCaps) {
- cap := v1.Capability(capability)
- drop = append(drop, cap)
+ drop = append(drop, v1.Capability(capability))
}
}
// Find caps in the container but not in the defaults; those indicate
// an added cap
for _, capability := range containerCaps {
if !util.StringInSlice(capability, defaultCaps) {
- cap := v1.Capability(capability)
- add = append(add, cap)
+ add = append(add, v1.Capability(capability))
}
}
diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go
new file mode 100644
index 000000000..e50d67321
--- /dev/null
+++ b/libpod/lock/file/file_lock.go
@@ -0,0 +1,175 @@
+package file
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "syscall"
+
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// FileLocks is a struct enabling POSIX lock locking in a shared memory
+// segment.
+type FileLocks struct { // nolint
+ lockPath string
+ valid bool
+}
+
+// CreateFileLock sets up a directory containing the various lock files.
+func CreateFileLock(path string) (*FileLocks, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path)
+ }
+ if err := os.MkdirAll(path, 0711); err != nil {
+ return nil, errors.Wrapf(err, "cannot create %s", path)
+ }
+
+ locks := new(FileLocks)
+ locks.lockPath = path
+ locks.valid = true
+
+ return locks, nil
+}
+
+// OpenFileLock opens an existing directory with the lock files.
+func OpenFileLock(path string) (*FileLocks, error) {
+ _, err := os.Stat(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "accessing directory %s", path)
+ }
+
+ locks := new(FileLocks)
+ locks.lockPath = path
+ locks.valid = true
+
+ return locks, nil
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *FileLocks) Close() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ err := os.RemoveAll(locks.lockPath)
+ if err != nil {
+ return errors.Wrapf(err, "deleting directory %s", locks.lockPath)
+ }
+ return nil
+}
+
+func (locks *FileLocks) getLockPath(lck uint32) string {
+ return filepath.Join(locks.lockPath, strconv.FormatInt(int64(lck), 10))
+}
+
+// AllocateLock allocates a lock and returns the index of the lock that was allocated.
+func (locks *FileLocks) AllocateLock() (uint32, error) {
+ if !locks.valid {
+ return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ id := uint32(0)
+ for ; ; id++ {
+ path := locks.getLockPath(id)
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ if os.IsExist(err) {
+ continue
+ }
+ return 0, errors.Wrapf(err, "creating lock file")
+ }
+ f.Close()
+ break
+ }
+ return id, nil
+}
+
+// AllocateGivenLock allocates the given lock from the shared-memory
+// segment for use by a container or pod.
+// If the lock is already in use or the index is invalid an error will be
+// returned.
+func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return errors.Wrapf(err, "error creating lock %d", lck)
+ }
+ f.Close()
+
+ return nil
+}
+
+// DeallocateLock frees a lock in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given lock must be already allocated, or an error will be returned.
+func (locks *FileLocks) DeallocateLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ if err := os.Remove(locks.getLockPath(lck)); err != nil {
+ return errors.Wrapf(err, "deallocating lock %d", lck)
+ }
+ return nil
+}
+
+// DeallocateAllLocks frees all locks so they can be reallocated to
+// other containers and pods.
+func (locks *FileLocks) DeallocateAllLocks() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ files, err := ioutil.ReadDir(locks.lockPath)
+ if err != nil {
+ return errors.Wrapf(err, "error reading directory %s", locks.lockPath)
+ }
+ var lastErr error
+ for _, f := range files {
+ p := filepath.Join(locks.lockPath, f.Name())
+ err := os.Remove(p)
+ if err != nil {
+ lastErr = err
+ logrus.Errorf("deallocating lock %s", p)
+ }
+ }
+ return lastErr
+}
+
+// LockFileLock locks the given lock.
+func (locks *FileLocks) LockFileLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ l, err := storage.GetLockfile(locks.getLockPath(lck))
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring lock")
+ }
+
+ l.Lock()
+ return nil
+}
+
+// UnlockFileLock unlocks the given lock.
+func (locks *FileLocks) UnlockFileLock(lck uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+ l, err := storage.GetLockfile(locks.getLockPath(lck))
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring lock")
+ }
+
+ l.Unlock()
+ return nil
+}
diff --git a/libpod/lock/file/file_lock_test.go b/libpod/lock/file/file_lock_test.go
new file mode 100644
index 000000000..6320d6b70
--- /dev/null
+++ b/libpod/lock/file/file_lock_test.go
@@ -0,0 +1,74 @@
+package file
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Test that creating and destroying locks work
+func TestCreateAndDeallocate(t *testing.T) {
+ d, err := ioutil.TempDir("", "filelock")
+ assert.NoError(t, err)
+ defer os.RemoveAll(d)
+
+ l, err := OpenFileLock(filepath.Join(d, "locks"))
+ assert.Error(t, err)
+
+ l, err = CreateFileLock(filepath.Join(d, "locks"))
+ assert.NoError(t, err)
+
+ lock, err := l.AllocateLock()
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.Error(t, err)
+
+ err = l.DeallocateLock(lock)
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.NoError(t, err)
+
+ err = l.DeallocateAllLocks()
+ assert.NoError(t, err)
+
+ err = l.AllocateGivenLock(lock)
+ assert.NoError(t, err)
+
+ err = l.DeallocateAllLocks()
+ assert.NoError(t, err)
+}
+
+// Test that creating and destroying locks work
+func TestLockAndUnlock(t *testing.T) {
+ d, err := ioutil.TempDir("", "filelock")
+ assert.NoError(t, err)
+ defer os.RemoveAll(d)
+
+ l, err := CreateFileLock(filepath.Join(d, "locks"))
+ assert.NoError(t, err)
+
+ lock, err := l.AllocateLock()
+ assert.NoError(t, err)
+
+ err = l.LockFileLock(lock)
+ assert.NoError(t, err)
+
+ lslocks, err := exec.LookPath("lslocks")
+ if err == nil {
+ lockPath := l.getLockPath(lock)
+ out, err := exec.Command(lslocks, "--json", "-p", fmt.Sprintf("%d", os.Getpid())).CombinedOutput()
+ assert.NoError(t, err)
+
+ assert.Contains(t, string(out), lockPath)
+ }
+
+ err = l.UnlockFileLock(lock)
+ assert.NoError(t, err)
+}
diff --git a/libpod/lock/file_lock_manager.go b/libpod/lock/file_lock_manager.go
new file mode 100644
index 000000000..8a4d939d3
--- /dev/null
+++ b/libpod/lock/file_lock_manager.go
@@ -0,0 +1,110 @@
+package lock
+
+import (
+ "github.com/containers/libpod/libpod/lock/file"
+)
+
+// FileLockManager manages shared memory locks.
+type FileLockManager struct {
+ locks *file.FileLocks
+}
+
+// NewFileLockManager makes a new FileLockManager at the specified directory.
+func NewFileLockManager(lockPath string) (Manager, error) {
+ locks, err := file.CreateFileLock(lockPath)
+ if err != nil {
+ return nil, err
+ }
+
+ manager := new(FileLockManager)
+ manager.locks = locks
+
+ return manager, nil
+}
+
+// OpenFileLockManager opens an existing FileLockManager at the specified directory.
+func OpenFileLockManager(path string) (Manager, error) {
+ locks, err := file.OpenFileLock(path)
+ if err != nil {
+ return nil, err
+ }
+
+ manager := new(FileLockManager)
+ manager.locks = locks
+
+ return manager, nil
+}
+
+// AllocateLock allocates a new lock from the manager.
+func (m *FileLockManager) AllocateLock() (Locker, error) {
+ semIndex, err := m.locks.AllocateLock()
+ if err != nil {
+ return nil, err
+ }
+
+ lock := new(FileLock)
+ lock.lockID = semIndex
+ lock.manager = m
+
+ return lock, nil
+}
+
+// AllocateAndRetrieveLock allocates the lock with the given ID and returns it.
+// If the lock is already allocated, error.
+func (m *FileLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
+ lock := new(FileLock)
+ lock.lockID = id
+ lock.manager = m
+
+ if err := m.locks.AllocateGivenLock(id); err != nil {
+ return nil, err
+ }
+
+ return lock, nil
+}
+
+// RetrieveLock retrieves a lock from the manager given its ID.
+func (m *FileLockManager) RetrieveLock(id uint32) (Locker, error) {
+ lock := new(FileLock)
+ lock.lockID = id
+ lock.manager = m
+
+ return lock, nil
+}
+
+// FreeAllLocks frees all locks in the manager.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *FileLockManager) FreeAllLocks() error {
+ return m.locks.DeallocateAllLocks()
+}
+
+// FileLock is an individual shared memory lock.
+type FileLock struct {
+ lockID uint32
+ manager *FileLockManager
+}
+
+// ID returns the ID of the lock.
+func (l *FileLock) ID() uint32 {
+ return l.lockID
+}
+
+// Lock acquires the lock.
+func (l *FileLock) Lock() {
+ if err := l.manager.locks.LockFileLock(l.lockID); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Unlock releases the lock.
+func (l *FileLock) Unlock() {
+ if err := l.manager.locks.UnlockFileLock(l.lockID); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Free releases the lock, allowing it to be reused.
+func (l *FileLock) Free() error {
+ return l.manager.locks.DeallocateLock(l.lockID)
+}
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index 76dd5729e..322e92a8f 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -1,3 +1,5 @@
+// +build linux,cgo
+
package shm
// #cgo LDFLAGS: -lrt -lpthread
@@ -20,7 +22,7 @@ var (
// BitmapSize is the size of the bitmap used when managing SHM locks.
// an SHM lock manager's max locks will be rounded up to a multiple of
// this number.
- BitmapSize uint32 = uint32(C.bitmap_size_c)
+ BitmapSize = uint32(C.bitmap_size_c)
)
// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
diff --git a/libpod/lock/shm/shm_lock_nocgo.go b/libpod/lock/shm/shm_lock_nocgo.go
new file mode 100644
index 000000000..ea1488c90
--- /dev/null
+++ b/libpod/lock/shm/shm_lock_nocgo.go
@@ -0,0 +1,102 @@
+// +build linux,!cgo
+
+package shm
+
+import (
+ "github.com/sirupsen/logrus"
+)
+
+// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
+// segment.
+type SHMLocks struct {
+}
+
+// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
+// semaphores, and returns a struct that can be used to operate on those locks.
+// numLocks must not be 0, and may be rounded up to a multiple of the bitmap
+// size used by the underlying implementation.
+func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ logrus.Error("locks are not supported without cgo")
+ return &SHMLocks{}, nil
+}
+
+// OpenSHMLock opens an existing shared-memory segment holding a given number of
+// POSIX semaphores. numLocks must match the number of locks the shared memory
+// segment was created with.
+func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ logrus.Error("locks are not supported without cgo")
+ return &SHMLocks{}, nil
+}
+
+// GetMaxLocks returns the maximum number of locks in the SHM
+func (locks *SHMLocks) GetMaxLocks() uint32 {
+ logrus.Error("locks are not supported without cgo")
+ return 0
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *SHMLocks) Close() error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
+// by a container or pod.
+// Returns the index of the semaphore that was allocated.
+// Allocations past the maximum number of locks given when the SHM segment was
+// created will result in an error, and no semaphore will be allocated.
+func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
+ logrus.Error("locks are not supported without cgo")
+ return 0, nil
+}
+
+// AllocateGivenSemaphore allocates the given semaphore from the shared-memory
+// segment for use by a container or pod.
+// If the semaphore is already in use or the index is invalid an error will be
+// returned.
+func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given semaphore must be already allocated, or an error will be returned.
+func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
+// other containers and pods.
+func (locks *SHMLocks) DeallocateAllSemaphores() error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// LockSemaphore locks the given semaphore.
+// If the semaphore is already locked, LockSemaphore will block until the lock
+// can be acquired.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) LockSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
+
+// UnlockSemaphore unlocks the given semaphore.
+// Unlocking a semaphore that is already unlocked with return EBUSY.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
+ logrus.Error("locks are not supported without cgo")
+ return nil
+}
diff --git a/libpod/container_log.go b/libpod/logs/log.go
index 374e5a1fc..488291cfe 100644
--- a/libpod/container_log.go
+++ b/libpod/logs/log.go
@@ -1,31 +1,29 @@
-package libpod
+package logs
import (
"fmt"
"io/ioutil"
- "os"
"strings"
"sync"
"time"
"github.com/hpcloud/tail"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
const (
- // logTimeFormat is the time format used in the log.
+ // LogTimeFormat is the time format used in the log.
// It is a modified version of RFC3339Nano that guarantees trailing
// zeroes are not trimmed, taken from
// https://github.com/golang/go/issues/19635
- logTimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
+ LogTimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
- // partialLogType signifies a log line that exceeded the buffer
+ // PartialLogType signifies a log line that exceeded the buffer
// length and needed to spill into a new line
- partialLogType = "P"
+ PartialLogType = "P"
- // fullLogType signifies a log line is full
- fullLogType = "F"
+ // FullLogType signifies a log line is full
+ FullLogType = "F"
)
// LogOptions is the options you can use for logs
@@ -48,72 +46,8 @@ type LogLine struct {
CID string
}
-// Log is a runtime function that can read one or more container logs.
-func (r *Runtime) Log(containers []*Container, options *LogOptions, logChannel chan *LogLine) error {
- for _, ctr := range containers {
- if err := ctr.ReadLog(options, logChannel); err != nil {
- return err
- }
- }
- return nil
-}
-
-// ReadLog reads a containers log based on the input options and returns loglines over a channel
-func (c *Container) ReadLog(options *LogOptions, logChannel chan *LogLine) error {
- // TODO Skip sending logs until journald logs can be read
- // TODO make this not a magic string
- if c.LogDriver() == JournaldLogging {
- return c.readFromJournal(options, logChannel)
- }
- return c.readFromLogFile(options, logChannel)
-}
-
-func (c *Container) readFromLogFile(options *LogOptions, logChannel chan *LogLine) error {
- t, tailLog, err := getLogFile(c.LogPath(), options)
- if err != nil {
- // If the log file does not exist, this is not fatal.
- if os.IsNotExist(errors.Cause(err)) {
- return nil
- }
- return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
- }
- options.WaitGroup.Add(1)
- if len(tailLog) > 0 {
- for _, nll := range tailLog {
- nll.CID = c.ID()
- if nll.Since(options.Since) {
- logChannel <- nll
- }
- }
- }
-
- go func() {
- var partial string
- for line := range t.Lines {
- nll, err := newLogLine(line.Text)
- if err != nil {
- logrus.Error(err)
- continue
- }
- if nll.Partial() {
- partial = partial + nll.Msg
- continue
- } else if !nll.Partial() && len(partial) > 1 {
- nll.Msg = partial
- partial = ""
- }
- nll.CID = c.ID()
- if nll.Since(options.Since) {
- logChannel <- nll
- }
- }
- options.WaitGroup.Done()
- }()
- return nil
-}
-
-// getLogFile returns an hp tail for a container given options
-func getLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error) {
+// GetLogFile returns an hp tail for a container given options
+func GetLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error) {
var (
whence int
err error
@@ -154,7 +88,7 @@ func getTailLog(path string, tail int) ([]*LogLine, error) {
if len(splitContent[i]) == 0 {
continue
}
- nll, err := newLogLine(splitContent[i])
+ nll, err := NewLogLine(splitContent[i])
if err != nil {
return nil, err
}
@@ -191,7 +125,7 @@ func (l *LogLine) String(options *LogOptions) string {
out = fmt.Sprintf("%s ", cid)
}
if options.Timestamps {
- out = out + fmt.Sprintf("%s ", l.Time.Format(logTimeFormat))
+ out = out + fmt.Sprintf("%s ", l.Time.Format(LogTimeFormat))
}
return out + l.Msg
}
@@ -201,13 +135,13 @@ func (l *LogLine) Since(since time.Time) bool {
return l.Time.After(since)
}
-// newLogLine creates a logLine struct from a container log string
-func newLogLine(line string) (*LogLine, error) {
+// NewLogLine creates a logLine struct from a container log string
+func NewLogLine(line string) (*LogLine, error) {
splitLine := strings.Split(line, " ")
if len(splitLine) < 4 {
return nil, errors.Errorf("'%s' is not a valid container log line", line)
}
- logTime, err := time.Parse(logTimeFormat, splitLine[0])
+ logTime, err := time.Parse(LogTimeFormat, splitLine[0])
if err != nil {
return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
}
@@ -222,7 +156,7 @@ func newLogLine(line string) (*LogLine, error) {
// Partial returns a bool if the log line is a partial log type
func (l *LogLine) Partial() bool {
- if l.ParseLogType == partialLogType {
+ if l.ParseLogType == PartialLogType {
return true
}
return false
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 93ec157c5..d978bceed 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -28,21 +28,23 @@ import (
// Get an OCICNI network config
func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork {
+ defaultNetwork := r.netPlugin.GetDefaultNetworkName()
network := ocicni.PodNetwork{
- Name: name,
- Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
- ID: id,
- NetNS: nsPath,
- PortMappings: ports,
- Networks: networks,
+ Name: name,
+ Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
+ ID: id,
+ NetNS: nsPath,
+ Networks: networks,
+ RuntimeConfig: map[string]ocicni.RuntimeConfig{
+ defaultNetwork: {PortMappings: ports},
+ },
}
if staticIP != nil {
- defaultNetwork := r.netPlugin.GetDefaultNetworkName()
-
network.Networks = []string{defaultNetwork}
- network.NetworkConfig = make(map[string]ocicni.NetworkConfig)
- network.NetworkConfig[defaultNetwork] = ocicni.NetworkConfig{IP: staticIP.String()}
+ network.RuntimeConfig = map[string]ocicni.RuntimeConfig{
+ defaultNetwork: {IP: staticIP.String(), PortMappings: ports},
+ }
}
return network
@@ -292,14 +294,14 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
}
buf := make([]byte, 2048)
- len, err := conn.Read(buf)
+ readLength, err := conn.Read(buf)
if err != nil {
return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
}
// if there is no 'error' key in the received JSON data, then the operation was
// successful.
var y map[string]interface{}
- if err := json.Unmarshal(buf[0:len], &y); err != nil {
+ if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
return errors.Wrapf(err, "error parsing error status from slirp4netns")
}
if e, found := y["error"]; found {
@@ -330,7 +332,9 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) {
if err != nil {
return errors.Wrapf(err, "cannot open %s", nsPath)
}
- mountPointFd.Close()
+ if err := mountPointFd.Close(); err != nil {
+ return err
+ }
if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
return errors.Wrapf(err, "cannot mount %s", nsPath)
@@ -350,12 +354,12 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) {
// Join an existing network namespace
func joinNetNS(path string) (ns.NetNS, error) {
- ns, err := ns.GetNS(path)
+ netNS, err := ns.GetNS(path)
if err != nil {
return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path)
}
- return ns, nil
+ return netNS, nil
}
// Close a network namespace.
diff --git a/libpod/oci.go b/libpod/oci.go
index 343738a3a..6aad79cdf 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -217,7 +217,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
// If not using the OCI runtime, we don't need to do most of this.
if !useRuntime {
// If the container's not running, nothing to do.
- if ctr.state.State != ContainerStateRunning && ctr.state.State != ContainerStatePaused {
+ if ctr.state.State != define.ContainerStateRunning && ctr.state.State != define.ContainerStatePaused {
return nil
}
@@ -233,7 +233,9 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
}
// Alright, it exists. Transition to Stopped state.
- ctr.state.State = ContainerStateStopped
+ ctr.state.State = define.ContainerStateStopped
+ ctr.state.PID = 0
+ ctr.state.ConmonPID = 0
// Read the exit file to get our stopped time and exit code.
return ctr.handleExitFile(exitFile, info)
@@ -261,17 +263,21 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
return errors.Wrapf(err, "error getting container %s state", ctr.ID())
}
if strings.Contains(string(out), "does not exist") {
- ctr.removeConmonFiles()
+ if err := ctr.removeConmonFiles(); err != nil {
+ logrus.Debugf("unable to remove conmon files for container %s", ctr.ID())
+ }
ctr.state.ExitCode = -1
ctr.state.FinishedTime = time.Now()
- ctr.state.State = ContainerStateExited
+ ctr.state.State = define.ContainerStateExited
return nil
}
return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
}
defer cmd.Wait()
- errPipe.Close()
+ if err := errPipe.Close(); err != nil {
+ return err
+ }
out, err := ioutil.ReadAll(outPipe)
if err != nil {
return errors.Wrapf(err, "error reading stdout: %s", ctr.ID())
@@ -283,13 +289,13 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
switch state.Status {
case "created":
- ctr.state.State = ContainerStateCreated
+ ctr.state.State = define.ContainerStateCreated
case "paused":
- ctr.state.State = ContainerStatePaused
+ ctr.state.State = define.ContainerStatePaused
case "running":
- ctr.state.State = ContainerStateRunning
+ ctr.state.State = define.ContainerStateRunning
case "stopped":
- ctr.state.State = ContainerStateStopped
+ ctr.state.State = define.ContainerStateStopped
default:
return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s",
ctr.ID(), state.Status)
@@ -297,7 +303,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
// Only grab exit status if we were not already stopped
// If we were, it should already be in the database
- if ctr.state.State == ContainerStateStopped && oldState != ContainerStateStopped {
+ if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped {
var fi os.FileInfo
chWait := make(chan error)
defer close(chWait)
@@ -431,8 +437,8 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
args = append(args, "--no-new-privs")
}
- for _, cap := range capAdd {
- args = append(args, "--cap", cap)
+ for _, capabilityAdd := range capAdd {
+ args = append(args, "--cap", capabilityAdd)
}
for _, envVar := range env {
@@ -473,7 +479,9 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
for fd := 3; fd < 3+preserveFDs; fd++ {
// These fds were passed down to the runtime. Close them
// and not interfere
- os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close()
+ if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
+ logrus.Debugf("unable to close file fd-%d", fd)
+ }
}
}
@@ -482,7 +490,9 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
// checkpointContainer checkpoints the given container
func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
- label.SetSocketLabel(ctr.ProcessLabel())
+ if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil {
+ return err
+ }
// imagePath is used by CRIU to store the actual checkpoint files
imagePath := ctr.CheckpointPath()
// workPath will be used to store dump.log and stats-dump
diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go
index 7d9f47ae2..802f4311b 100644
--- a/libpod/oci_linux.go
+++ b/libpod/oci_linux.go
@@ -342,7 +342,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
)
plabel, err = selinux.CurrentLabel()
if err != nil {
- childPipe.Close()
+ if err := childPipe.Close(); err != nil {
+ logrus.Errorf("failed to close child pipe: %q", err)
+ }
return errors.Wrapf(err, "Failed to get current SELinux label")
}
@@ -446,6 +448,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
return errors.Wrapf(define.ErrInternal, "container create failed")
}
ctr.state.PID = ss.si.Pid
+ if cmd.Process != nil {
+ ctr.state.ConmonPID = cmd.Process.Pid
+ }
case <-time.After(ContainerCreateTimeout):
return errors.Wrapf(define.ErrInternal, "container creation timeout")
}
diff --git a/libpod/options.go b/libpod/options.go
index 0f23a6c97..4f8bb42df 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -300,6 +300,15 @@ func WithTmpDir(dir string) RuntimeOption {
}
}
+// WithNoStore sets a bool on the runtime that we do not need
+// any containers storage.
+func WithNoStore() RuntimeOption {
+ return func(rt *Runtime) error {
+ rt.noStore = true
+ return nil
+ }
+}
+
// WithMaxLogSize sets the maximum size of container logs.
// Positive sizes are limits in bytes, -1 is unlimited.
func WithMaxLogSize(limit int64) RuntimeOption {
@@ -316,7 +325,7 @@ func WithMaxLogSize(limit int64) RuntimeOption {
// WithNoPivotRoot sets the runtime to use MS_MOVE instead of PIVOT_ROOT when
// starting containers.
-func WithNoPivotRoot(noPivot bool) RuntimeOption {
+func WithNoPivotRoot() RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
return config2.ErrRuntimeFinalized
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 3126ced4c..c7b0353bd 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -113,7 +113,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
}
// Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
@@ -181,7 +181,7 @@ func (p *Pod) Pause() (map[string]error, error) {
}
// Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
@@ -240,7 +240,7 @@ func (p *Pod) Unpause() (map[string]error, error) {
}
// Ignore containers that are not paused
- if ctr.state.State != ContainerStatePaused {
+ if ctr.state.State != define.ContainerStatePaused {
ctr.lock.Unlock()
continue
}
@@ -353,7 +353,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
}
// Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
+ if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
@@ -383,7 +383,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
// Status gets the status of all containers in the pod
// Returns a map of Container ID to Container Status
-func (p *Pod) Status() (map[string]ContainerStatus, error) {
+func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
p.lock.Lock()
defer p.lock.Unlock()
@@ -403,7 +403,7 @@ func (p *Pod) Status() (map[string]ContainerStatus, error) {
}
// Now that all containers are locked, get their status
- status := make(map[string]ContainerStatus, len(allCtrs))
+ status := make(map[string]define.ContainerStatus, len(allCtrs))
for _, ctr := range allCtrs {
if err := ctr.syncContainer(); err != nil {
return nil, err
diff --git a/libpod/pod_top_linux.go b/libpod/pod_top_linux.go
index e08e5e83a..80221c3a9 100644
--- a/libpod/pod_top_linux.go
+++ b/libpod/pod_top_linux.go
@@ -6,6 +6,7 @@ import (
"strconv"
"strings"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/psgo"
)
@@ -34,7 +35,7 @@ func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) {
c.lock.Unlock()
return nil, err
}
- if c.state.State == ContainerStateRunning {
+ if c.state.State == define.ContainerStateRunning {
pid := strconv.Itoa(c.state.PID)
pids = append(pids, pid)
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 97eb565cc..53c9a1209 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -10,6 +10,7 @@ import (
"strings"
"sync"
"syscall"
+ "time"
"github.com/BurntSushi/toml"
is "github.com/containers/image/storage"
@@ -73,14 +74,17 @@ var (
OverrideConfigPath = etcDir + "/containers/libpod.conf"
// DefaultInfraImage to use for infra container
- DefaultInfraImage = "k8s.gcr.io/pause:3.1"
+
// DefaultInfraCommand to be run in an infra container
- DefaultInfraCommand = "/pause"
// DefaultSHMLockPath is the default path for SHM locks
DefaultSHMLockPath = "/libpod_lock"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
+
+ // DefaultDetachKeys is the default keys sequence for detaching a
+ // container
+ DefaultDetachKeys = "ctrl-p,ctrl-q"
)
// A RuntimeOption is a functional option which alters the Runtime created by
@@ -121,6 +125,9 @@ type Runtime struct {
// mechanism to read and write even logs
eventer events.Eventer
+
+ // noStore indicates whether we need to interact with a store or not
+ noStore bool
}
// RuntimeConfig contains configuration options used to set up the runtime
@@ -232,10 +239,15 @@ type RuntimeConfig struct {
// pods.
NumLocks uint32 `toml:"num_locks,omitempty"`
+ // LockType is the type of locking to use.
+ LockType string `toml:"lock_type,omitempty"`
+
// EventsLogger determines where events should be logged
EventsLogger string `toml:"events_logger"`
// EventsLogFilePath is where the events log is stored.
- EventsLogFilePath string `toml:-"events_logfile_path"`
+ EventsLogFilePath string `toml:"-events_logfile_path"`
+ //DetachKeys is the sequence of keys used to detach a container
+ DetachKeys string `toml:"detach_keys"`
}
// runtimeConfiguredFrom is a struct used during early runtime init to help
@@ -302,15 +314,57 @@ func defaultRuntimeConfig() (RuntimeConfig, error) {
NoPivotRoot: false,
CNIConfigDir: etcDir + "/cni/net.d/",
CNIPluginDir: []string{"/usr/libexec/cni", "/usr/lib/cni", "/usr/local/lib/cni", "/opt/cni/bin"},
- InfraCommand: DefaultInfraCommand,
- InfraImage: DefaultInfraImage,
+ InfraCommand: define.DefaultInfraCommand,
+ InfraImage: define.DefaultInfraImage,
EnablePortReservation: true,
EnableLabeling: true,
NumLocks: 2048,
EventsLogger: events.DefaultEventerType.String(),
+ DetachKeys: DefaultDetachKeys,
+ LockType: "shm",
}, nil
}
+// SetXdgRuntimeDir ensures the XDG_RUNTIME_DIR env variable is set
+// containers/image uses XDG_RUNTIME_DIR to locate the auth file.
+// It internally calls EnableLinger() so that the user's processes are not
+// killed once the session is terminated. EnableLinger() also attempts to
+// get the runtime directory when XDG_RUNTIME_DIR is not specified.
+func SetXdgRuntimeDir() error {
+ if !rootless.IsRootless() {
+ return nil
+ }
+
+ runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
+
+ runtimeDirLinger, err := rootless.EnableLinger()
+ if err != nil {
+ return errors.Wrapf(err, "error enabling user session")
+ }
+ if runtimeDir == "" && runtimeDirLinger != "" {
+ if _, err := os.Stat(runtimeDirLinger); err != nil && os.IsNotExist(err) {
+ chWait := make(chan error)
+ defer close(chWait)
+ if _, err := WaitForFile(runtimeDirLinger, chWait, time.Second*10); err != nil {
+ return errors.Wrapf(err, "waiting for directory '%s'", runtimeDirLinger)
+ }
+ }
+ runtimeDir = runtimeDirLinger
+ }
+
+ if runtimeDir == "" {
+ var err error
+ runtimeDir, err = util.GetRootlessRuntimeDir()
+ if err != nil {
+ return err
+ }
+ }
+ if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
+ return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
+ }
+ return nil
+}
+
func getDefaultTmpDir() (string, error) {
if !rootless.IsRootless() {
return "/var/run/libpod", nil
@@ -333,25 +387,6 @@ func getDefaultTmpDir() (string, error) {
return filepath.Join(libpodRuntimeDir, "tmp"), nil
}
-// SetXdgRuntimeDir ensures the XDG_RUNTIME_DIR env variable is set
-// containers/image uses XDG_RUNTIME_DIR to locate the auth file.
-func SetXdgRuntimeDir(val string) error {
- if !rootless.IsRootless() {
- return nil
- }
- if val == "" {
- var err error
- val, err = util.GetRootlessRuntimeDir()
- if err != nil {
- return err
- }
- }
- if err := os.Setenv("XDG_RUNTIME_DIR", val); err != nil {
- return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
- }
- return nil
-}
-
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) {
@@ -373,7 +408,7 @@ func NewRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
func homeDir() (string, error) {
home := os.Getenv("HOME")
if home == "" {
- usr, err := user.Current()
+ usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID()))
if err != nil {
return "", errors.Wrapf(err, "unable to resolve HOME directory")
}
@@ -391,28 +426,33 @@ func getRootlessConfigPath() (string, error) {
return filepath.Join(home, ".config/containers/libpod.conf"), nil
}
-func getConfigPath() string {
+func getConfigPath() (string, error) {
if rootless.IsRootless() {
- rootlessConfigPath, err := getRootlessConfigPath()
+ path, err := getRootlessConfigPath()
if err != nil {
- if _, err := os.Stat(rootlessConfigPath); err == nil {
- return rootlessConfigPath
- }
+ return "", err
+ }
+ if _, err := os.Stat(path); err == nil {
+ return path, nil
}
+ return "", err
}
if _, err := os.Stat(OverrideConfigPath); err == nil {
// Use the override configuration path
- return OverrideConfigPath
+ return OverrideConfigPath, nil
}
if _, err := os.Stat(ConfigPath); err == nil {
- return ConfigPath
+ return ConfigPath, nil
}
- return ""
+ return "", nil
}
// DefaultRuntimeConfig reads default config path and returns the RuntimeConfig
func DefaultRuntimeConfig() (*RuntimeConfig, error) {
- configPath := getConfigPath()
+ configPath, err := getConfigPath()
+ if err != nil {
+ return nil, err
+ }
contents, err := ioutil.ReadFile(configPath)
if err != nil {
@@ -460,8 +500,10 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
runtime.config.StaticDir = filepath.Join(storageConf.GraphRoot, "libpod")
runtime.config.VolumePath = filepath.Join(storageConf.GraphRoot, "volumes")
- configPath := getConfigPath()
- rootlessConfigPath := ""
+ configPath, err := getConfigPath()
+ if err != nil {
+ return nil, err
+ }
if rootless.IsRootless() {
home, err := homeDir()
if err != nil {
@@ -473,23 +515,6 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
runtime.config.SignaturePolicyPath = newPath
}
}
-
- rootlessConfigPath, err = getRootlessConfigPath()
- if err != nil {
- return nil, err
- }
-
- runtimeDir, err := util.GetRootlessRuntimeDir()
- if err != nil {
- return nil, err
- }
-
- // containers/image uses XDG_RUNTIME_DIR to locate the auth file.
- // So make sure the env variable is set.
- if err := SetXdgRuntimeDir(runtimeDir); err != nil {
- return nil, errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
- }
-
}
if userConfigPath != "" {
@@ -599,7 +624,13 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
return nil, errors.Wrapf(err, "error configuring runtime")
}
}
- if rootlessConfigPath != "" {
+
+ if rootless.IsRootless() && configPath == "" {
+ configPath, err := getRootlessConfigPath()
+ if err != nil {
+ return nil, err
+ }
+
// storage.conf
storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless())
if err != nil {
@@ -612,16 +643,20 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
}
if configPath != "" {
- os.MkdirAll(filepath.Dir(rootlessConfigPath), 0755)
- file, err := os.OpenFile(rootlessConfigPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil {
+ return nil, err
+ }
+ file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil && !os.IsExist(err) {
- return nil, errors.Wrapf(err, "cannot open file %s", rootlessConfigPath)
+ return nil, errors.Wrapf(err, "cannot open file %s", configPath)
}
if err == nil {
defer file.Close()
enc := toml.NewEncoder(file)
if err := enc.Encode(runtime.config); err != nil {
- os.Remove(rootlessConfigPath)
+ if removeErr := os.Remove(configPath); removeErr != nil {
+ logrus.Debugf("unable to remove %s: %q", configPath, err)
+ }
}
}
}
@@ -632,6 +667,62 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
return runtime, nil
}
+func getLockManager(runtime *Runtime) (lock.Manager, error) {
+ var err error
+ var manager lock.Manager
+
+ switch runtime.config.LockType {
+ case "file":
+ lockPath := filepath.Join(runtime.config.TmpDir, "locks")
+ manager, err = lock.OpenFileLockManager(lockPath)
+ if err != nil {
+ if os.IsNotExist(errors.Cause(err)) {
+ manager, err = lock.NewFileLockManager(lockPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get new file lock manager")
+ }
+ } else {
+ return nil, err
+ }
+ }
+
+ case "", "shm":
+ lockPath := DefaultSHMLockPath
+ if rootless.IsRootless() {
+ lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
+ }
+ // Set up the lock manager
+ manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ if os.IsNotExist(errors.Cause(err)) {
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get new shm lock manager")
+ }
+ } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
+ logrus.Debugf("Number of locks does not match - removing old locks")
+
+ // ERANGE indicates a lock numbering mismatch.
+ // Since we're renumbering, this is not fatal.
+ // Remove the earlier set of locks and recreate.
+ if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
+ return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ }
+
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ }
+ default:
+ return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.LockType)
+ }
+ return manager, nil
+}
+
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
@@ -760,11 +851,14 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
var store storage.Store
if os.Geteuid() != 0 {
logrus.Debug("Not configuring container store")
+ } else if runtime.noStore {
+ logrus.Debug("No store required. Not opening container store.")
} else {
store, err = storage.GetStore(runtime.config.StorageConfig)
if err != nil {
return err
}
+ err = nil
defer func() {
if err != nil && store != nil {
@@ -1014,37 +1108,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
}
- lockPath := DefaultSHMLockPath
- if rootless.IsRootless() {
- lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
- }
- // Set up the lock manager
- manager, err := lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
+ runtime.lockManager, err = getLockManager(runtime)
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
- manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
- if err != nil {
- return errors.Wrapf(err, "failed to get new shm lock manager")
- }
- } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
- logrus.Debugf("Number of locks does not match - removing old locks")
-
- // ERANGE indicates a lock numbering mismatch.
- // Since we're renumbering, this is not fatal.
- // Remove the earlier set of locks and recreate.
- if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
- return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
- }
-
- manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
- if err != nil {
- return err
- }
- } else {
- return err
- }
+ return err
}
- runtime.lockManager = manager
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
@@ -1124,6 +1191,8 @@ func (r *Runtime) Shutdown(force bool) error {
}
var lastError error
+ // If no store was requested, it can bew nil and there is no need to
+ // attempt to shut it down
if r.store != nil {
if _, err := r.store.Shutdown(force); err != nil {
lastError = errors.Wrapf(err, "Error shutting down container storage")
@@ -1191,21 +1260,21 @@ func (r *Runtime) refresh(alivePath string) error {
}
// Info returns the store and host information
-func (r *Runtime) Info() ([]InfoData, error) {
- info := []InfoData{}
+func (r *Runtime) Info() ([]define.InfoData, error) {
+ info := []define.InfoData{}
// get host information
hostInfo, err := r.hostInfo()
if err != nil {
return nil, errors.Wrapf(err, "error getting host info")
}
- info = append(info, InfoData{Type: "host", Data: hostInfo})
+ info = append(info, define.InfoData{Type: "host", Data: hostInfo})
// get store information
storeInfo, err := r.storeInfo()
if err != nil {
return nil, errors.Wrapf(err, "error getting store info")
}
- info = append(info, InfoData{Type: "store", Data: storeInfo})
+ info = append(info, define.InfoData{Type: "store", Data: storeInfo})
reg, err := sysreg.GetRegistries()
if err != nil {
@@ -1225,7 +1294,7 @@ func (r *Runtime) Info() ([]InfoData, error) {
return nil, errors.Wrapf(err, "error getting registries")
}
registries["blocked"] = breg
- info = append(info, InfoData{Type: "registries", Data: registries})
+ info = append(info, define.InfoData{Type: "registries", Data: registries})
return info, nil
}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 79e18dcd1..ae9b3e5bc 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -52,7 +52,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
if err != nil {
return nil, errors.Wrapf(err, "error initializing container variables")
}
- return r.setupContainer(ctx, ctr, true)
+ return r.setupContainer(ctx, ctr)
}
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) {
@@ -68,6 +68,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
ctr.config.ShmSize = DefaultShmSize
} else {
// This is a restore from an imported checkpoint
+ ctr.restoreFromCheckpoint = true
if err := JSONDeepCopy(config, ctr.config); err != nil {
return nil, errors.Wrapf(err, "error copying container config for restore")
}
@@ -119,10 +120,10 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
return nil, errors.Wrapf(err, "error running container create option")
}
}
- return r.setupContainer(ctx, ctr, false)
+ return r.setupContainer(ctx, ctr)
}
-func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bool) (c *Container, err error) {
+func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Container, err error) {
// Allocate a lock for the container
lock, err := r.lockManager.AllocateLock()
if err != nil {
@@ -132,8 +133,16 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bo
ctr.config.LockID = ctr.lock.ID()
logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID())
+ defer func() {
+ if err != nil {
+ if err2 := ctr.lock.Free(); err2 != nil {
+ logrus.Errorf("Error freeing lock for container after creation failed: %v", err2)
+ }
+ }
+ }()
+
ctr.valid = true
- ctr.state.State = ContainerStateConfigured
+ ctr.state.State = config2.ContainerStateConfigured
ctr.runtime = r
if ctr.config.OCIRuntime == "" {
@@ -203,7 +212,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bo
return nil, errors.Wrapf(config2.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager)
}
- if restore {
+ if ctr.restoreFromCheckpoint {
// Remove information about bind mount
// for new container from imported checkpoint
g := generate.Generator{Config: ctr.config.Spec}
@@ -228,7 +237,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bo
}
}()
- if rootless.IsRootless() && ctr.config.ConmonPidFile == "" {
+ if ctr.config.ConmonPidFile == "" {
ctr.config.ConmonPidFile = filepath.Join(ctr.state.RunDir, "conmon.pid")
}
@@ -370,7 +379,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
- if c.state.State == ContainerStatePaused {
+ if c.state.State == config2.ContainerStatePaused {
if err := c.ociRuntime.killContainer(c, 9); err != nil {
return err
}
@@ -384,7 +393,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
// Check that the container's in a good state to be removed
- if c.state.State == ContainerStateRunning {
+ if c.state.State == config2.ContainerStateRunning {
if err := c.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil {
return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
}
@@ -422,22 +431,17 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// If we're removing the pod, the container will be evicted
// from the state elsewhere
if !removePod {
- if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
- if cleanupErr == nil {
- cleanupErr = err
- } else {
- logrus.Errorf("removing container from pod: %v", err)
- }
- }
- }
- } else {
- if err := r.state.RemoveContainer(c); err != nil {
if cleanupErr == nil {
cleanupErr = err
} else {
- logrus.Errorf("removing container: %v", err)
+ logrus.Errorf("removing container from pod: %v", err)
}
}
+ } else {
+ if err := r.state.RemoveContainer(c); err != nil {
+ cleanupErr = err
+ }
+ logrus.Errorf("removing container: %v", err)
}
// Set container as invalid so it can no longer be used
@@ -464,8 +468,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// Delete the container.
// Not needed in Configured and Exited states, where the container
// doesn't exist in the runtime
- if c.state.State != ContainerStateConfigured &&
- c.state.State != ContainerStateExited {
+ if c.state.State != config2.ContainerStateConfigured &&
+ c.state.State != config2.ContainerStateExited {
if err := c.delete(ctx); err != nil {
if cleanupErr == nil {
cleanupErr = err
@@ -582,7 +586,7 @@ func (r *Runtime) GetAllContainers() ([]*Container, error) {
func (r *Runtime) GetRunningContainers() ([]*Container, error) {
running := func(c *Container) bool {
state, _ := c.State()
- return state == ContainerStateRunning
+ return state == config2.ContainerStateRunning
}
return r.GetContainers(running)
}
diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go
index e32e6edf6..c363991e6 100644
--- a/libpod/runtime_migrate.go
+++ b/libpod/runtime_migrate.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
"fmt"
+ "github.com/containers/libpod/pkg/util"
"io/ioutil"
"os"
"path/filepath"
@@ -12,7 +13,6 @@ import (
"syscall"
"github.com/containers/libpod/pkg/rootless"
- "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -37,7 +37,9 @@ func stopPauseProcess() error {
if err := os.Remove(pausePidPath); err != nil {
return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath)
}
- syscall.Kill(pausePid, syscall.SIGKILL)
+ if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil {
+ return err
+ }
}
return nil
}
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index e9ce130da..d667d3a25 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -19,7 +19,7 @@ import (
)
// NewPod makes a new, empty pod
-func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, error) {
+func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Pod, Err error) {
r.lock.Lock()
defer r.lock.Unlock()
@@ -60,6 +60,14 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
pod.lock = lock
pod.config.LockID = pod.lock.ID()
+ defer func() {
+ if Err != nil {
+ if err := pod.lock.Free(); err != nil {
+ logrus.Errorf("Error freeing pod lock after failed creation: %v", err)
+ }
+ }
+ }()
+
pod.valid = true
// Check CGroup parent sanity, and set it if it was not set
@@ -113,15 +121,17 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
if err := r.state.AddPod(pod); err != nil {
return nil, errors.Wrapf(err, "error adding pod to state")
}
+ defer func() {
+ if Err != nil {
+ if err := r.removePod(ctx, pod, true, true); err != nil {
+ logrus.Errorf("Error removing pod after pause container creation failure: %v", err)
+ }
+ }
+ }()
if pod.HasInfraContainer() {
ctr, err := r.createInfraContainer(ctx, pod)
if err != nil {
- // Tear down pod, as it is assumed a the pod will contain
- // a pause container, and it does not.
- if err2 := r.removePod(ctx, pod, true, true); err2 != nil {
- logrus.Errorf("Error removing pod after pause container creation failure: %v", err2)
- }
return nil, errors.Wrapf(err, "error adding Infra Container")
}
pod.state.InfraContainerID = ctr.ID()
diff --git a/libpod/state_test.go b/libpod/state_test.go
index be68a2d69..26a1dee7d 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/containers/storage"
"github.com/stretchr/testify/assert"
@@ -700,7 +701,7 @@ func TestSaveAndUpdateContainer(t *testing.T) {
retrievedCtr, err := state.Container(testCtr.ID())
require.NoError(t, err)
- retrievedCtr.state.State = ContainerStateStopped
+ retrievedCtr.state.State = define.ContainerStateStopped
retrievedCtr.state.ExitCode = 127
retrievedCtr.state.FinishedTime = time.Now()
@@ -729,7 +730,7 @@ func TestSaveAndUpdateContainerSameNamespaceSucceeds(t *testing.T) {
retrievedCtr, err := state.Container(testCtr.ID())
assert.NoError(t, err)
- retrievedCtr.state.State = ContainerStateStopped
+ retrievedCtr.state.State = define.ContainerStateStopped
retrievedCtr.state.ExitCode = 127
retrievedCtr.state.FinishedTime = time.Now()
diff --git a/libpod/stats.go b/libpod/stats.go
index e003f145b..52af824bb 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -3,6 +3,7 @@
package libpod
import (
+ "runtime"
"strings"
"syscall"
"time"
@@ -26,7 +27,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
}
}
- if c.state.State != ContainerStateRunning {
+ if c.state.State != define.ContainerStateRunning {
return stats, define.ErrCtrStateInvalid
}
@@ -45,10 +46,6 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
return stats, errors.Wrapf(err, "unable to obtain cgroup stats")
}
conState := c.state.State
- if err != nil {
- return stats, errors.Wrapf(err, "unable to determine container state")
- }
-
netStats, err := getContainerNetIO(c)
if err != nil {
return nil, err
@@ -61,7 +58,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
stats.MemLimit = getMemLimit(cgroupStats.Memory.Usage.Limit)
stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100
stats.PIDs = 0
- if conState == ContainerStateRunning {
+ if conState == define.ContainerStateRunning {
stats.PIDs = cgroupStats.Pids.Current
}
stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats)
@@ -105,7 +102,11 @@ func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, previousSystem uin
if systemDelta > 0.0 && cpuDelta > 0.0 {
// gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running
// at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage
- cpuPercent = (cpuDelta / systemDelta) * float64(len(stats.CPU.Usage.PerCPU)) * 100
+ nCPUS := len(stats.CPU.Usage.PerCPU)
+ if nCPUS == 0 {
+ nCPUS = runtime.NumCPU()
+ }
+ cpuPercent = (cpuDelta / systemDelta) * float64(nCPUS) * 100
}
return cpuPercent
}
diff --git a/libpod/util.go b/libpod/util.go
index 7b3a03785..b60575264 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -9,8 +9,6 @@ import (
"strings"
"time"
- "github.com/containers/image/signature"
- "github.com/containers/image/types"
"github.com/containers/libpod/libpod/define"
"github.com/fsnotify/fsnotify"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -24,17 +22,6 @@ const (
DefaultTransport = "docker://"
)
-// OpenExclusiveFile opens a file for writing and ensure it doesn't already exist
-func OpenExclusiveFile(path string) (*os.File, error) {
- baseDir := filepath.Dir(path)
- if baseDir != "" {
- if _, err := os.Stat(baseDir); err != nil {
- return nil, err
- }
- }
- return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
-}
-
// FuncTimer helps measure the execution time of a function
// For debug purposes, do not leave in code
// used like defer FuncTimer("foo")
@@ -43,24 +30,6 @@ func FuncTimer(funcName string) {
fmt.Printf("%s executed in %d ms\n", funcName, elapsed)
}
-// CopyStringStringMap deep copies a map[string]string and returns the result
-func CopyStringStringMap(m map[string]string) map[string]string {
- n := map[string]string{}
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-// GetPolicyContext creates a signature policy context for the given signature policy path
-func GetPolicyContext(path string) (*signature.PolicyContext, error) {
- policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path})
- if err != nil {
- return nil, err
- }
- return signature.NewPolicyContext(policy)
-}
-
// RemoveScientificNotationFromFloat returns a float without any
// scientific notation if the number has any.
// golang does not handle conversion of float64s that have scientific