diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/boltdb_state.go | 14 | ||||
-rw-r--r-- | libpod/container.go | 20 | ||||
-rw-r--r-- | libpod/container_attach_linux.go | 6 | ||||
-rw-r--r-- | libpod/container_attach_linux_cgo.go | 11 | ||||
-rw-r--r-- | libpod/container_attach_linux_nocgo.go | 7 | ||||
-rw-r--r-- | libpod/container_inspect.go | 2 | ||||
-rw-r--r-- | libpod/container_internal.go | 5 | ||||
-rw-r--r-- | libpod/image/pull.go | 4 | ||||
-rw-r--r-- | libpod/lock/file/file_lock.go | 175 | ||||
-rw-r--r-- | libpod/lock/file/file_lock_test.go | 74 | ||||
-rw-r--r-- | libpod/lock/file_lock_manager.go | 110 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.go | 4 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock_nocgo.go | 102 | ||||
-rw-r--r-- | libpod/networking_linux.go | 22 | ||||
-rw-r--r-- | libpod/oci.go | 2 | ||||
-rw-r--r-- | libpod/oci_linux.go | 3 | ||||
-rw-r--r-- | libpod/options.go | 9 | ||||
-rw-r--r-- | libpod/runtime.go | 99 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 8 | ||||
-rw-r--r-- | libpod/runtime_pod_linux.go | 22 | ||||
-rw-r--r-- | libpod/stats.go | 7 |
21 files changed, 638 insertions, 68 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index b047c9fa0..4dda3a7f0 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -381,11 +381,6 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { defer s.closeDBCon(db) err = db.View(func(tx *bolt.Tx) error { - idBucket, err := getIDBucket(tx) - if err != nil { - return err - } - ctrBucket, err := getCtrBucket(tx) if err != nil { return err @@ -436,7 +431,7 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { // We were not given a full container ID or name. // Search for partial ID matches. exists := false - err = idBucket.ForEach(func(checkID, checkName []byte) error { + err = ctrBucket.ForEach(func(checkID, checkName []byte) error { // If the container isn't in our namespace, we // can't match it if s.namespaceBytes != nil { @@ -963,11 +958,6 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) { defer s.closeDBCon(db) err = db.View(func(tx *bolt.Tx) error { - idBucket, err := getIDBucket(tx) - if err != nil { - return err - } - podBkt, err := getPodBucket(tx) if err != nil { return err @@ -1015,7 +1005,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) { // They did not give us a full pod name or ID. // Search for partial ID matches. exists := false - err = idBucket.ForEach(func(checkID, checkName []byte) error { + err = podBkt.ForEach(func(checkID, checkName []byte) error { // If the pod isn't in our namespace, we // can't match it if s.namespaceBytes != nil { diff --git a/libpod/container.go b/libpod/container.go index 713386477..bfbc47d76 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -168,6 +168,8 @@ type ContainerState struct { OOMKilled bool `json:"oomKilled,omitempty"` // PID is the PID of a running container PID int `json:"pid,omitempty"` + // ConmonPID is the PID of the container's conmon + ConmonPID int `json:"conmonPid,omitempty"` // ExecSessions contains active exec sessions for container // Exec session ID is mapped to PID of exec process ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"` @@ -849,7 +851,7 @@ func (c *Container) OOMKilled() (bool, error) { return c.state.OOMKilled, nil } -// PID returns the PID of the container +// PID returns the PID of the container. // If the container is not running, a pid of 0 will be returned. No error will // occur. func (c *Container) PID() (int, error) { @@ -865,6 +867,22 @@ func (c *Container) PID() (int, error) { return c.state.PID, nil } +// ConmonPID Returns the PID of the container's conmon process. +// If the container is not running, a PID of 0 will be returned. No error will +// occur. +func (c *Container) ConmonPID() (int, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return -1, err + } + } + + return c.state.ConmonPID, nil +} + // ExecSessions retrieves active exec sessions running in the container func (c *Container) ExecSessions() ([]string, error) { if !c.batched { diff --git a/libpod/container_attach_linux.go b/libpod/container_attach_linux.go index f5aac5794..fc53268c3 100644 --- a/libpod/container_attach_linux.go +++ b/libpod/container_attach_linux.go @@ -19,10 +19,6 @@ import ( "k8s.io/client-go/tools/remotecommand" ) -//#include <sys/un.h> -// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;} -import "C" - /* Sync with stdpipe_t in conmon.c */ const ( AttachPipeStdin = 1 @@ -80,7 +76,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi socketPath := c.AttachSocketPath() - maxUnixLength := int(C.unix_path_length()) + maxUnixLength := unixPathLength() if maxUnixLength < len(socketPath) { socketPath = socketPath[0:maxUnixLength] } diff --git a/libpod/container_attach_linux_cgo.go b/libpod/container_attach_linux_cgo.go new file mode 100644 index 000000000..d81243360 --- /dev/null +++ b/libpod/container_attach_linux_cgo.go @@ -0,0 +1,11 @@ +//+build linux,cgo + +package libpod + +//#include <sys/un.h> +// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;} +import "C" + +func unixPathLength() int { + return int(C.unix_path_length()) +} diff --git a/libpod/container_attach_linux_nocgo.go b/libpod/container_attach_linux_nocgo.go new file mode 100644 index 000000000..a514a555d --- /dev/null +++ b/libpod/container_attach_linux_nocgo.go @@ -0,0 +1,7 @@ +//+build linux,!cgo + +package libpod + +func unixPathLength() int { + return 107 +} diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 0a4d7016f..18e84e1f1 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -145,6 +145,7 @@ type InspectContainerState struct { OOMKilled bool `json:"OOMKilled"` Dead bool `json:"Dead"` Pid int `json:"Pid"` + ConmonPid int `json:"ConmonPid,omitempty"` ExitCode int32 `json:"ExitCode"` Error string `json:"Error"` // TODO StartedAt time.Time `json:"StartedAt"` @@ -261,6 +262,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) OOMKilled: runtimeInfo.OOMKilled, Dead: runtimeInfo.State.String() == "bad state", Pid: runtimeInfo.PID, + ConmonPid: runtimeInfo.ConmonPID, ExitCode: runtimeInfo.ExitCode, Error: "", // can't get yet StartedAt: runtimeInfo.StartedTime, diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 884f5a0a9..611fa9800 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -452,6 +452,7 @@ func (c *Container) teardownStorage() error { // It does not save the results - assumes the database will do that for us func resetState(state *ContainerState) error { state.PID = 0 + state.ConmonPID = 0 state.Mountpoint = "" state.Mounted = false if state.State != define.ContainerStateExited { @@ -609,7 +610,7 @@ func (c *Container) isStopped() (bool, error) { if err != nil { return true, err } - return (c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused), nil + return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil } // save container state to the database @@ -1043,6 +1044,8 @@ func (c *Container) stop(timeout uint) error { return err } + c.state.PID = 0 + c.state.ConmonPID = 0 c.state.StoppedByUser = true if err := c.save(); err != nil { return errors.Wrapf(err, "error saving container %s state after stopping", c.ID()) diff --git a/libpod/image/pull.go b/libpod/image/pull.go index 644a9ae86..e5765febc 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -19,8 +19,8 @@ import ( "github.com/containers/image/types" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/registries" - multierror "github.com/hashicorp/go-multierror" - opentracing "github.com/opentracing/opentracing-go" + "github.com/hashicorp/go-multierror" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go new file mode 100644 index 000000000..e50d67321 --- /dev/null +++ b/libpod/lock/file/file_lock.go @@ -0,0 +1,175 @@ +package file + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + + "github.com/containers/storage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// FileLocks is a struct enabling POSIX lock locking in a shared memory +// segment. +type FileLocks struct { // nolint + lockPath string + valid bool +} + +// CreateFileLock sets up a directory containing the various lock files. +func CreateFileLock(path string) (*FileLocks, error) { + _, err := os.Stat(path) + if err == nil { + return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path) + } + if err := os.MkdirAll(path, 0711); err != nil { + return nil, errors.Wrapf(err, "cannot create %s", path) + } + + locks := new(FileLocks) + locks.lockPath = path + locks.valid = true + + return locks, nil +} + +// OpenFileLock opens an existing directory with the lock files. +func OpenFileLock(path string) (*FileLocks, error) { + _, err := os.Stat(path) + if err != nil { + return nil, errors.Wrapf(err, "accessing directory %s", path) + } + + locks := new(FileLocks) + locks.lockPath = path + locks.valid = true + + return locks, nil +} + +// Close closes an existing shared-memory segment. +// The segment will be rendered unusable after closing. +// WARNING: If you Close() while there are still locks locked, these locks may +// fail to release, causing a program freeze. +// Close() is only intended to be used while testing the locks. +func (locks *FileLocks) Close() error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + err := os.RemoveAll(locks.lockPath) + if err != nil { + return errors.Wrapf(err, "deleting directory %s", locks.lockPath) + } + return nil +} + +func (locks *FileLocks) getLockPath(lck uint32) string { + return filepath.Join(locks.lockPath, strconv.FormatInt(int64(lck), 10)) +} + +// AllocateLock allocates a lock and returns the index of the lock that was allocated. +func (locks *FileLocks) AllocateLock() (uint32, error) { + if !locks.valid { + return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + id := uint32(0) + for ; ; id++ { + path := locks.getLockPath(id) + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + if os.IsExist(err) { + continue + } + return 0, errors.Wrapf(err, "creating lock file") + } + f.Close() + break + } + return id, nil +} + +// AllocateGivenLock allocates the given lock from the shared-memory +// segment for use by a container or pod. +// If the lock is already in use or the index is invalid an error will be +// returned. +func (locks *FileLocks) AllocateGivenLock(lck uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return errors.Wrapf(err, "error creating lock %d", lck) + } + f.Close() + + return nil +} + +// DeallocateLock frees a lock in a shared-memory segment so it can be +// reallocated to another container or pod. +// The given lock must be already allocated, or an error will be returned. +func (locks *FileLocks) DeallocateLock(lck uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + if err := os.Remove(locks.getLockPath(lck)); err != nil { + return errors.Wrapf(err, "deallocating lock %d", lck) + } + return nil +} + +// DeallocateAllLocks frees all locks so they can be reallocated to +// other containers and pods. +func (locks *FileLocks) DeallocateAllLocks() error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + files, err := ioutil.ReadDir(locks.lockPath) + if err != nil { + return errors.Wrapf(err, "error reading directory %s", locks.lockPath) + } + var lastErr error + for _, f := range files { + p := filepath.Join(locks.lockPath, f.Name()) + err := os.Remove(p) + if err != nil { + lastErr = err + logrus.Errorf("deallocating lock %s", p) + } + } + return lastErr +} + +// LockFileLock locks the given lock. +func (locks *FileLocks) LockFileLock(lck uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + l, err := storage.GetLockfile(locks.getLockPath(lck)) + if err != nil { + return errors.Wrapf(err, "error acquiring lock") + } + + l.Lock() + return nil +} + +// UnlockFileLock unlocks the given lock. +func (locks *FileLocks) UnlockFileLock(lck uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + l, err := storage.GetLockfile(locks.getLockPath(lck)) + if err != nil { + return errors.Wrapf(err, "error acquiring lock") + } + + l.Unlock() + return nil +} diff --git a/libpod/lock/file/file_lock_test.go b/libpod/lock/file/file_lock_test.go new file mode 100644 index 000000000..6320d6b70 --- /dev/null +++ b/libpod/lock/file/file_lock_test.go @@ -0,0 +1,74 @@ +package file + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Test that creating and destroying locks work +func TestCreateAndDeallocate(t *testing.T) { + d, err := ioutil.TempDir("", "filelock") + assert.NoError(t, err) + defer os.RemoveAll(d) + + l, err := OpenFileLock(filepath.Join(d, "locks")) + assert.Error(t, err) + + l, err = CreateFileLock(filepath.Join(d, "locks")) + assert.NoError(t, err) + + lock, err := l.AllocateLock() + assert.NoError(t, err) + + err = l.AllocateGivenLock(lock) + assert.Error(t, err) + + err = l.DeallocateLock(lock) + assert.NoError(t, err) + + err = l.AllocateGivenLock(lock) + assert.NoError(t, err) + + err = l.DeallocateAllLocks() + assert.NoError(t, err) + + err = l.AllocateGivenLock(lock) + assert.NoError(t, err) + + err = l.DeallocateAllLocks() + assert.NoError(t, err) +} + +// Test that creating and destroying locks work +func TestLockAndUnlock(t *testing.T) { + d, err := ioutil.TempDir("", "filelock") + assert.NoError(t, err) + defer os.RemoveAll(d) + + l, err := CreateFileLock(filepath.Join(d, "locks")) + assert.NoError(t, err) + + lock, err := l.AllocateLock() + assert.NoError(t, err) + + err = l.LockFileLock(lock) + assert.NoError(t, err) + + lslocks, err := exec.LookPath("lslocks") + if err == nil { + lockPath := l.getLockPath(lock) + out, err := exec.Command(lslocks, "--json", "-p", fmt.Sprintf("%d", os.Getpid())).CombinedOutput() + assert.NoError(t, err) + + assert.Contains(t, string(out), lockPath) + } + + err = l.UnlockFileLock(lock) + assert.NoError(t, err) +} diff --git a/libpod/lock/file_lock_manager.go b/libpod/lock/file_lock_manager.go new file mode 100644 index 000000000..8a4d939d3 --- /dev/null +++ b/libpod/lock/file_lock_manager.go @@ -0,0 +1,110 @@ +package lock + +import ( + "github.com/containers/libpod/libpod/lock/file" +) + +// FileLockManager manages shared memory locks. +type FileLockManager struct { + locks *file.FileLocks +} + +// NewFileLockManager makes a new FileLockManager at the specified directory. +func NewFileLockManager(lockPath string) (Manager, error) { + locks, err := file.CreateFileLock(lockPath) + if err != nil { + return nil, err + } + + manager := new(FileLockManager) + manager.locks = locks + + return manager, nil +} + +// OpenFileLockManager opens an existing FileLockManager at the specified directory. +func OpenFileLockManager(path string) (Manager, error) { + locks, err := file.OpenFileLock(path) + if err != nil { + return nil, err + } + + manager := new(FileLockManager) + manager.locks = locks + + return manager, nil +} + +// AllocateLock allocates a new lock from the manager. +func (m *FileLockManager) AllocateLock() (Locker, error) { + semIndex, err := m.locks.AllocateLock() + if err != nil { + return nil, err + } + + lock := new(FileLock) + lock.lockID = semIndex + lock.manager = m + + return lock, nil +} + +// AllocateAndRetrieveLock allocates the lock with the given ID and returns it. +// If the lock is already allocated, error. +func (m *FileLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) { + lock := new(FileLock) + lock.lockID = id + lock.manager = m + + if err := m.locks.AllocateGivenLock(id); err != nil { + return nil, err + } + + return lock, nil +} + +// RetrieveLock retrieves a lock from the manager given its ID. +func (m *FileLockManager) RetrieveLock(id uint32) (Locker, error) { + lock := new(FileLock) + lock.lockID = id + lock.manager = m + + return lock, nil +} + +// FreeAllLocks frees all locks in the manager. +// This function is DANGEROUS. Please read the full comment in locks.go before +// trying to use it. +func (m *FileLockManager) FreeAllLocks() error { + return m.locks.DeallocateAllLocks() +} + +// FileLock is an individual shared memory lock. +type FileLock struct { + lockID uint32 + manager *FileLockManager +} + +// ID returns the ID of the lock. +func (l *FileLock) ID() uint32 { + return l.lockID +} + +// Lock acquires the lock. +func (l *FileLock) Lock() { + if err := l.manager.locks.LockFileLock(l.lockID); err != nil { + panic(err.Error()) + } +} + +// Unlock releases the lock. +func (l *FileLock) Unlock() { + if err := l.manager.locks.UnlockFileLock(l.lockID); err != nil { + panic(err.Error()) + } +} + +// Free releases the lock, allowing it to be reused. +func (l *FileLock) Free() error { + return l.manager.locks.DeallocateLock(l.lockID) +} diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index 76dd5729e..322e92a8f 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -1,3 +1,5 @@ +// +build linux,cgo + package shm // #cgo LDFLAGS: -lrt -lpthread @@ -20,7 +22,7 @@ var ( // BitmapSize is the size of the bitmap used when managing SHM locks. // an SHM lock manager's max locks will be rounded up to a multiple of // this number. - BitmapSize uint32 = uint32(C.bitmap_size_c) + BitmapSize = uint32(C.bitmap_size_c) ) // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory diff --git a/libpod/lock/shm/shm_lock_nocgo.go b/libpod/lock/shm/shm_lock_nocgo.go new file mode 100644 index 000000000..ea1488c90 --- /dev/null +++ b/libpod/lock/shm/shm_lock_nocgo.go @@ -0,0 +1,102 @@ +// +build linux,!cgo + +package shm + +import ( + "github.com/sirupsen/logrus" +) + +// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory +// segment. +type SHMLocks struct { +} + +// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX +// semaphores, and returns a struct that can be used to operate on those locks. +// numLocks must not be 0, and may be rounded up to a multiple of the bitmap +// size used by the underlying implementation. +func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) { + logrus.Error("locks are not supported without cgo") + return &SHMLocks{}, nil +} + +// OpenSHMLock opens an existing shared-memory segment holding a given number of +// POSIX semaphores. numLocks must match the number of locks the shared memory +// segment was created with. +func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) { + logrus.Error("locks are not supported without cgo") + return &SHMLocks{}, nil +} + +// GetMaxLocks returns the maximum number of locks in the SHM +func (locks *SHMLocks) GetMaxLocks() uint32 { + logrus.Error("locks are not supported without cgo") + return 0 +} + +// Close closes an existing shared-memory segment. +// The segment will be rendered unusable after closing. +// WARNING: If you Close() while there are still locks locked, these locks may +// fail to release, causing a program freeze. +// Close() is only intended to be used while testing the locks. +func (locks *SHMLocks) Close() error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// AllocateSemaphore allocates a semaphore from a shared-memory segment for use +// by a container or pod. +// Returns the index of the semaphore that was allocated. +// Allocations past the maximum number of locks given when the SHM segment was +// created will result in an error, and no semaphore will be allocated. +func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { + logrus.Error("locks are not supported without cgo") + return 0, nil +} + +// AllocateGivenSemaphore allocates the given semaphore from the shared-memory +// segment for use by a container or pod. +// If the semaphore is already in use or the index is invalid an error will be +// returned. +func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be +// reallocated to another container or pod. +// The given semaphore must be already allocated, or an error will be returned. +func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// DeallocateAllSemaphores frees all semaphores so they can be reallocated to +// other containers and pods. +func (locks *SHMLocks) DeallocateAllSemaphores() error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// LockSemaphore locks the given semaphore. +// If the semaphore is already locked, LockSemaphore will block until the lock +// can be acquired. +// There is no requirement that the given semaphore be allocated. +// This ensures that attempts to lock a container after it has been deleted, +// but before the caller has queried the database to determine this, will +// succeed. +func (locks *SHMLocks) LockSemaphore(sem uint32) error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// UnlockSemaphore unlocks the given semaphore. +// Unlocking a semaphore that is already unlocked with return EBUSY. +// There is no requirement that the given semaphore be allocated. +// This ensures that attempts to lock a container after it has been deleted, +// but before the caller has queried the database to determine this, will +// succeed. +func (locks *SHMLocks) UnlockSemaphore(sem uint32) error { + logrus.Error("locks are not supported without cgo") + return nil +} diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 93ec157c5..27585b8d5 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -28,21 +28,23 @@ import ( // Get an OCICNI network config func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork { + defaultNetwork := r.netPlugin.GetDefaultNetworkName() network := ocicni.PodNetwork{ - Name: name, - Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces - ID: id, - NetNS: nsPath, - PortMappings: ports, - Networks: networks, + Name: name, + Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces + ID: id, + NetNS: nsPath, + Networks: networks, + RuntimeConfig: map[string]ocicni.RuntimeConfig{ + defaultNetwork: {PortMappings: ports}, + }, } if staticIP != nil { - defaultNetwork := r.netPlugin.GetDefaultNetworkName() - network.Networks = []string{defaultNetwork} - network.NetworkConfig = make(map[string]ocicni.NetworkConfig) - network.NetworkConfig[defaultNetwork] = ocicni.NetworkConfig{IP: staticIP.String()} + network.RuntimeConfig = map[string]ocicni.RuntimeConfig{ + defaultNetwork: {IP: staticIP.String(), PortMappings: ports}, + } } return network diff --git a/libpod/oci.go b/libpod/oci.go index efb5e42cc..fdd783100 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -234,6 +234,8 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro // Alright, it exists. Transition to Stopped state. ctr.state.State = define.ContainerStateStopped + ctr.state.PID = 0 + ctr.state.ConmonPID = 0 // Read the exit file to get our stopped time and exit code. return ctr.handleExitFile(exitFile, info) diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go index 7d9f47ae2..24502ef4f 100644 --- a/libpod/oci_linux.go +++ b/libpod/oci_linux.go @@ -446,6 +446,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res return errors.Wrapf(define.ErrInternal, "container create failed") } ctr.state.PID = ss.si.Pid + if cmd.Process != nil { + ctr.state.ConmonPID = cmd.Process.Pid + } case <-time.After(ContainerCreateTimeout): return errors.Wrapf(define.ErrInternal, "container creation timeout") } diff --git a/libpod/options.go b/libpod/options.go index 0f23a6c97..78634e953 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -300,6 +300,15 @@ func WithTmpDir(dir string) RuntimeOption { } } +// WithNoStore sets a bool on the runtime that we do not need +// any containers storage. +func WithNoStore() RuntimeOption { + return func(rt *Runtime) error { + rt.noStore = true + return nil + } +} + // WithMaxLogSize sets the maximum size of container logs. // Positive sizes are limits in bytes, -1 is unlimited. func WithMaxLogSize(limit int64) RuntimeOption { diff --git a/libpod/runtime.go b/libpod/runtime.go index 02aa76731..6c61e15d3 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -125,6 +125,9 @@ type Runtime struct { // mechanism to read and write even logs eventer events.Eventer + + // noStore indicates whether we need to interact with a store or not + noStore bool } // RuntimeConfig contains configuration options used to set up the runtime @@ -236,6 +239,9 @@ type RuntimeConfig struct { // pods. NumLocks uint32 `toml:"num_locks,omitempty"` + // LockType is the type of locking to use. + LockType string `toml:"lock_type,omitempty"` + // EventsLogger determines where events should be logged EventsLogger string `toml:"events_logger"` // EventsLogFilePath is where the events log is stored. @@ -315,6 +321,7 @@ func defaultRuntimeConfig() (RuntimeConfig, error) { NumLocks: 2048, EventsLogger: events.DefaultEventerType.String(), DetachKeys: DefaultDetachKeys, + LockType: "shm", }, nil } @@ -656,6 +663,62 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. return runtime, nil } +func getLockManager(runtime *Runtime) (lock.Manager, error) { + var err error + var manager lock.Manager + + switch runtime.config.LockType { + case "file": + lockPath := filepath.Join(runtime.config.TmpDir, "locks") + manager, err = lock.OpenFileLockManager(lockPath) + if err != nil { + if os.IsNotExist(errors.Cause(err)) { + manager, err = lock.NewFileLockManager(lockPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to get new file lock manager") + } + } else { + return nil, err + } + } + + case "", "shm": + lockPath := DefaultSHMLockPath + if rootless.IsRootless() { + lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID()) + } + // Set up the lock manager + manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks) + if err != nil { + if os.IsNotExist(errors.Cause(err)) { + manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) + if err != nil { + return nil, errors.Wrapf(err, "failed to get new shm lock manager") + } + } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber { + logrus.Debugf("Number of locks does not match - removing old locks") + + // ERANGE indicates a lock numbering mismatch. + // Since we're renumbering, this is not fatal. + // Remove the earlier set of locks and recreate. + if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil { + return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath) + } + + manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + default: + return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.LockType) + } + return manager, nil +} + // Make a new runtime based on the given configuration // Sets up containers/storage, state store, OCI runtime func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { @@ -784,11 +847,14 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { var store storage.Store if os.Geteuid() != 0 { logrus.Debug("Not configuring container store") + } else if runtime.noStore { + logrus.Debug("No store required. Not opening container store.") } else { store, err = storage.GetStore(runtime.config.StorageConfig) if err != nil { return err } + err = nil defer func() { if err != nil && store != nil { @@ -1038,37 +1104,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } } - lockPath := DefaultSHMLockPath - if rootless.IsRootless() { - lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID()) - } - // Set up the lock manager - manager, err := lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks) + runtime.lockManager, err = getLockManager(runtime) if err != nil { - if os.IsNotExist(errors.Cause(err)) { - manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) - if err != nil { - return errors.Wrapf(err, "failed to get new shm lock manager") - } - } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber { - logrus.Debugf("Number of locks does not match - removing old locks") - - // ERANGE indicates a lock numbering mismatch. - // Since we're renumbering, this is not fatal. - // Remove the earlier set of locks and recreate. - if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil { - return errors.Wrapf(err, "error removing libpod locks file %s", lockPath) - } - - manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) - if err != nil { - return err - } - } else { - return err - } + return err } - runtime.lockManager = manager // If we're renumbering locks, do it now. // It breaks out of normal runtime init, and will not return a valid @@ -1148,6 +1187,8 @@ func (r *Runtime) Shutdown(force bool) error { } var lastError error + // If no store was requested, it can bew nil and there is no need to + // attempt to shut it down if r.store != nil { if _, err := r.store.Shutdown(force); err != nil { lastError = errors.Wrapf(err, "Error shutting down container storage") diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 0d0f700a6..9daac161c 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -132,6 +132,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bo ctr.config.LockID = ctr.lock.ID() logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID()) + defer func() { + if err != nil { + if err2 := ctr.lock.Free(); err2 != nil { + logrus.Errorf("Error freeing lock for container after creation failed: %v", err2) + } + } + }() + ctr.valid = true ctr.state.State = config2.ContainerStateConfigured ctr.runtime = r diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index e9ce130da..d667d3a25 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -19,7 +19,7 @@ import ( ) // NewPod makes a new, empty pod -func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, error) { +func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Pod, Err error) { r.lock.Lock() defer r.lock.Unlock() @@ -60,6 +60,14 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, pod.lock = lock pod.config.LockID = pod.lock.ID() + defer func() { + if Err != nil { + if err := pod.lock.Free(); err != nil { + logrus.Errorf("Error freeing pod lock after failed creation: %v", err) + } + } + }() + pod.valid = true // Check CGroup parent sanity, and set it if it was not set @@ -113,15 +121,17 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, if err := r.state.AddPod(pod); err != nil { return nil, errors.Wrapf(err, "error adding pod to state") } + defer func() { + if Err != nil { + if err := r.removePod(ctx, pod, true, true); err != nil { + logrus.Errorf("Error removing pod after pause container creation failure: %v", err) + } + } + }() if pod.HasInfraContainer() { ctr, err := r.createInfraContainer(ctx, pod) if err != nil { - // Tear down pod, as it is assumed a the pod will contain - // a pause container, and it does not. - if err2 := r.removePod(ctx, pod, true, true); err2 != nil { - logrus.Errorf("Error removing pod after pause container creation failure: %v", err2) - } return nil, errors.Wrapf(err, "error adding Infra Container") } pod.state.InfraContainerID = ctr.ID() diff --git a/libpod/stats.go b/libpod/stats.go index da383e9d9..eb5ed95c4 100644 --- a/libpod/stats.go +++ b/libpod/stats.go @@ -3,6 +3,7 @@ package libpod import ( + "runtime" "strings" "syscall" "time" @@ -105,7 +106,11 @@ func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, previousSystem uin if systemDelta > 0.0 && cpuDelta > 0.0 { // gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running // at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage - cpuPercent = (cpuDelta / systemDelta) * float64(len(stats.CPU.Usage.PerCPU)) * 100 + nCPUS := len(stats.CPU.Usage.PerCPU) + if nCPUS == 0 { + nCPUS = runtime.NumCPU() + } + cpuPercent = (cpuDelta / systemDelta) * float64(nCPUS) * 100 } return cpuPercent } |