aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2018-08-09 13:44:52 -0400
committerAtomic Bot <atomic-devel@projectatomic.io>2018-08-10 18:14:31 +0000
commitccbaa15de15390d0fb0a19aec51c2dbeb02771c3 (patch)
tree41dbf021ed91e3c807c4738b2d303f816d3262d6
parent7366697175f64b734c9a6a652c3af511772b44d6 (diff)
downloadpodman-ccbaa15de15390d0fb0a19aec51c2dbeb02771c3.tar.gz
podman-ccbaa15de15390d0fb0a19aec51c2dbeb02771c3.tar.bz2
podman-ccbaa15de15390d0fb0a19aec51c2dbeb02771c3.zip
Split pod.go into 3 files
This removes anything but structs and simple accessors from pod.go itself, which is a target file for FFJSON generation. This should reduce the amount of times FFJSON needs to run. Signed-off-by: Matthew Heon <matthew.heon@gmail.com> Closes: #1247 Approved by: rhatdan
-rw-r--r--libpod/pod.go590
-rw-r--r--libpod/pod_api.go428
-rw-r--r--libpod/pod_ffjson.go617
-rw-r--r--libpod/pod_internal.go177
4 files changed, 1222 insertions, 590 deletions
diff --git a/libpod/pod.go b/libpod/pod.go
index 46182680a..24469fea8 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -1,16 +1,9 @@
package libpod
import (
- "context"
- "path/filepath"
- "strings"
"time"
"github.com/containers/storage"
- "github.com/docker/docker/pkg/stringid"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/ulule/deepcopier"
)
// Pod represents a group of containers that are managed together.
@@ -127,520 +120,6 @@ func (p *Pod) CgroupPath() (string, error) {
return p.state.CgroupPath, nil
}
-// Creates a new, empty pod
-func newPod(lockDir string, runtime *Runtime) (*Pod, error) {
- pod := new(Pod)
- pod.config = new(PodConfig)
- pod.config.ID = stringid.GenerateNonCryptoID()
- pod.config.Labels = make(map[string]string)
- pod.config.CreatedTime = time.Now()
- pod.state = new(podState)
- pod.runtime = runtime
-
- // Path our lock file will reside at
- lockPath := filepath.Join(lockDir, pod.config.ID)
- // Grab a lockfile at the given path
- lock, err := storage.GetLockfile(lockPath)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating lockfile for new pod")
- }
- pod.lock = lock
-
- return pod, nil
-}
-
-// Update pod state from database
-func (p *Pod) updatePod() error {
- if err := p.runtime.state.UpdatePod(p); err != nil {
- return err
- }
-
- return nil
-}
-
-// Save pod state to database
-func (p *Pod) save() error {
- if err := p.runtime.state.SavePod(p); err != nil {
- return errors.Wrapf(err, "error saving pod %s state")
- }
-
- return nil
-}
-
-// Refresh a pod's state after restart
-func (p *Pod) refresh() error {
- // Need to to an update from the DB to pull potentially-missing state
- if err := p.runtime.state.UpdatePod(p); err != nil {
- return err
- }
-
- if !p.valid {
- return ErrPodRemoved
- }
-
- // We need to recreate the pod's cgroup
- if p.config.UsePodCgroup {
- switch p.runtime.config.CgroupManager {
- case SystemdCgroupsManager:
- // NOOP for now, until proper systemd cgroup management
- // is implemented
- case CgroupfsCgroupsManager:
- p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID())
-
- logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
- default:
- return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
- }
- }
-
- // Save changes
- return p.save()
-}
-
-// Start starts all containers within a pod
-// It combines the effects of Init() and Start() on a container
-// If a container has already been initialized it will be started,
-// otherwise it will be initialized then started.
-// Containers that are already running or have been paused are ignored
-// All containers are started independently, in order dictated by their
-// dependencies.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were started
-// If map is not nil, an error was encountered when starting one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were started successfully
-func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- // Build a dependency graph of containers in the pod
- graph, err := buildContainerGraph(allCtrs)
- if err != nil {
- return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
- }
-
- ctrErrors := make(map[string]error)
- ctrsVisited := make(map[string]bool)
-
- // If there are no containers without dependencies, we can't start
- // Error out
- if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
- }
-
- // Traverse the graph beginning at nodes with no dependencies
- for _, node := range graph.noDepNodes {
- startNode(ctx, node, false, ctrErrors, ctrsVisited, false)
- }
-
- return ctrErrors, nil
-}
-
-// Visit a node on a container graph and start the container, or set an error if
-// a dependency failed to start. if restart is true, startNode will restart the node instead of starting it.
-func startNode(ctx context.Context, node *containerNode, setError bool, ctrErrors map[string]error, ctrsVisited map[string]bool, restart bool) {
- // First, check if we have already visited the node
- if ctrsVisited[node.id] {
- return
- }
-
- // If setError is true, a dependency of us failed
- // Mark us as failed and recurse
- if setError {
- // Mark us as visited, and set an error
- ctrsVisited[node.id] = true
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
-
- // Hit anyone who depends on us, and set errors on them too
- for _, successor := range node.dependedOn {
- startNode(ctx, successor, true, ctrErrors, ctrsVisited, restart)
- }
-
- return
- }
-
- // Have all our dependencies started?
- // If not, don't visit the node yet
- depsVisited := true
- for _, dep := range node.dependsOn {
- depsVisited = depsVisited && ctrsVisited[dep.id]
- }
- if !depsVisited {
- // Don't visit us yet, all dependencies are not up
- // We'll hit the dependencies eventually, and when we do it will
- // recurse here
- return
- }
-
- // Going to try to start the container, mark us as visited
- ctrsVisited[node.id] = true
-
- ctrErrored := false
-
- // Check if dependencies are running
- // Graph traversal means we should have started them
- // But they could have died before we got here
- // Does not require that the container be locked, we only need to lock
- // the dependencies
- depsStopped, err := node.container.checkDependenciesRunning()
- if err != nil {
- ctrErrors[node.id] = err
- ctrErrored = true
- } else if len(depsStopped) > 0 {
- // Our dependencies are not running
- depsList := strings.Join(depsStopped, ",")
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
- ctrErrored = true
- }
-
- // Lock before we start
- node.container.lock.Lock()
-
- // Sync the container to pick up current state
- if !ctrErrored {
- if err := node.container.syncContainer(); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
-
- // Start the container (only if it is not running)
- if !ctrErrored {
- if !restart && node.container.state.State != ContainerStateRunning {
- if err := node.container.initAndStart(ctx); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
- if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
- if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
- }
-
- node.container.lock.Unlock()
-
- // Recurse to anyone who depends on us and start them
- for _, successor := range node.dependedOn {
- startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
- }
-
- return
-}
-
-// Stop stops all containers within a pod that are not already stopped
-// Each container will use its own stop timeout
-// Only running containers will be stopped. Paused, stopped, or created
-// containers will be ignored.
-// If cleanup is true, mounts and network namespaces will be cleaned up after
-// the container is stopped.
-// All containers are stopped independently. An error stopping one container
-// will not prevent other containers being stopped.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were stopped
-// If map is not nil, an error was encountered when stopping one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were stopped without error
-func (p *Pod) Stop(cleanup bool) (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- ctrErrors := make(map[string]error)
-
- // TODO: There may be cases where it makes sense to order stops based on
- // dependencies. Should we bother with this?
-
- // Stop to all containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
-
- if err := ctr.syncContainer(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- // Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
- ctr.lock.Unlock()
- continue
- }
-
- if err := ctr.stop(ctr.config.StopTimeout); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- if cleanup {
- if err := ctr.cleanup(); err != nil {
- ctrErrors[ctr.ID()] = err
- }
- }
-
- ctr.lock.Unlock()
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
- }
-
- return nil, nil
-}
-
-// Pause pauses all containers within a pod that are running.
-// Only running containers will be paused. Paused, stopped, or created
-// containers will be ignored.
-// All containers are paused independently. An error pausing one container
-// will not prevent other containers being paused.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were paused
-// If map is not nil, an error was encountered when pausing one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were paused without error
-func (p *Pod) Pause() (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- ctrErrors := make(map[string]error)
-
- // Pause to all containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
-
- if err := ctr.syncContainer(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- // Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
- ctr.lock.Unlock()
- continue
- }
-
- if err := ctr.pause(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- ctr.lock.Unlock()
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error pausing some containers")
- }
-
- return nil, nil
-}
-
-// Unpause unpauses all containers within a pod that are running.
-// Only paused containers will be unpaused. Running, stopped, or created
-// containers will be ignored.
-// All containers are unpaused independently. An error unpausing one container
-// will not prevent other containers being unpaused.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were unpaused
-// If map is not nil, an error was encountered when unpausing one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were unpaused without error
-func (p *Pod) Unpause() (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- ctrErrors := make(map[string]error)
-
- // Pause to all containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
-
- if err := ctr.syncContainer(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- // Ignore containers that are not paused
- if ctr.state.State != ContainerStatePaused {
- ctr.lock.Unlock()
- continue
- }
-
- if err := ctr.unpause(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- ctr.lock.Unlock()
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error unpausing some containers")
- }
-
- return nil, nil
-}
-
-// Restart restarts all containers within a pod that are not paused or in an error state.
-// It combines the effects of Stop() and Start() on a container
-// Each container will use its own stop timeout.
-// All containers are started independently, in order dictated by their
-// dependencies. An error restarting one container
-// will not prevent other containers being restarted.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were restarted
-// If map is not nil, an error was encountered when restarting one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were restarted without error
-func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- // Build a dependency graph of containers in the pod
- graph, err := buildContainerGraph(allCtrs)
- if err != nil {
- return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
- }
-
- ctrErrors := make(map[string]error)
- ctrsVisited := make(map[string]bool)
-
- // If there are no containers without dependencies, we can't start
- // Error out
- if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
- }
-
- // Traverse the graph beginning at nodes with no dependencies
- for _, node := range graph.noDepNodes {
- startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
- }
-
- return nil, nil
-}
-
-// Kill sends a signal to all running containers within a pod
-// Signals will only be sent to running containers. Containers that are not
-// running will be ignored. All signals are sent independently, and sending will
-// continue even if some containers encounter errors.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were signalled
-// If map is not nil, an error was encountered when signalling one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were signalled successfully
-func (p *Pod) Kill(signal uint) (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- ctrErrors := make(map[string]error)
-
- // Send a signal to all containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
-
- if err := ctr.syncContainer(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- // Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
- ctr.lock.Unlock()
- continue
- }
-
- if err := ctr.runtime.ociRuntime.killContainer(ctr, signal); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- logrus.Debugf("Killed container %s with signal %d", ctr.ID(), signal)
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, nil
- }
-
- return nil, nil
-}
-
// HasContainer checks if a container is present in the pod
func (p *Pod) HasContainer(id string) (bool, error) {
if !p.valid {
@@ -674,75 +153,6 @@ func (p *Pod) AllContainers() ([]*Container, error) {
return p.runtime.state.PodContainers(p)
}
-// Status gets the status of all containers in the pod
-// Returns a map of Container ID to Container Status
-func (p *Pod) Status() (map[string]ContainerStatus, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- // We need to lock all the containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
- defer ctr.lock.Unlock()
- }
-
- // Now that all containers are locked, get their status
- status := make(map[string]ContainerStatus, len(allCtrs))
- for _, ctr := range allCtrs {
- if err := ctr.syncContainer(); err != nil {
- return nil, err
- }
-
- status[ctr.ID()] = ctr.state.State
- }
-
- return status, nil
-}
-
// TODO add pod batching
// Lock pod to avoid lock contention
// Store and lock all containers (no RemoveContainer in batch guarantees cache will not become stale)
-
-// Inspect returns a PodInspect struct to describe the pod
-func (p *Pod) Inspect() (*PodInspect, error) {
- var (
- podContainers []PodContainerInfo
- )
-
- containers, err := p.AllContainers()
- if err != nil {
- return &PodInspect{}, err
- }
- for _, c := range containers {
- containerStatus := "unknown"
- // Ignoring possible errors here because we dont want this to be
- // catastrophic in nature
- containerState, err := c.State()
- if err == nil {
- containerStatus = containerState.String()
- }
- pc := PodContainerInfo{
- ID: c.ID(),
- State: containerStatus,
- }
- podContainers = append(podContainers, pc)
- }
-
- config := new(PodConfig)
- deepcopier.Copy(p.config).To(config)
- inspectData := PodInspect{
- Config: config,
- State: p.state,
- Containers: podContainers,
- }
- return &inspectData, nil
-}
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
new file mode 100644
index 000000000..82c19e2b5
--- /dev/null
+++ b/libpod/pod_api.go
@@ -0,0 +1,428 @@
+package libpod
+
+import (
+ "context"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/ulule/deepcopier"
+)
+
+// Start starts all containers within a pod
+// It combines the effects of Init() and Start() on a container
+// If a container has already been initialized it will be started,
+// otherwise it will be initialized then started.
+// Containers that are already running or have been paused are ignored
+// All containers are started independently, in order dictated by their
+// dependencies.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were started
+// If map is not nil, an error was encountered when starting one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were started successfully
+func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build a dependency graph of containers in the pod
+ graph, err := buildContainerGraph(allCtrs)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
+ }
+
+ ctrErrors := make(map[string]error)
+ ctrsVisited := make(map[string]bool)
+
+ // If there are no containers without dependencies, we can't start
+ // Error out
+ if len(graph.noDepNodes) == 0 {
+ return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ }
+
+ // Traverse the graph beginning at nodes with no dependencies
+ for _, node := range graph.noDepNodes {
+ startNode(ctx, node, false, ctrErrors, ctrsVisited, false)
+ }
+
+ return ctrErrors, nil
+}
+
+// Stop stops all containers within a pod that are not already stopped
+// Each container will use its own stop timeout
+// Only running containers will be stopped. Paused, stopped, or created
+// containers will be ignored.
+// If cleanup is true, mounts and network namespaces will be cleaned up after
+// the container is stopped.
+// All containers are stopped independently. An error stopping one container
+// will not prevent other containers being stopped.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were stopped
+// If map is not nil, an error was encountered when stopping one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were stopped without error
+func (p *Pod) Stop(cleanup bool) (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrErrors := make(map[string]error)
+
+ // TODO: There may be cases where it makes sense to order stops based on
+ // dependencies. Should we bother with this?
+
+ // Stop to all containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ // Ignore containers that are not running
+ if ctr.state.State != ContainerStateRunning {
+ ctr.lock.Unlock()
+ continue
+ }
+
+ if err := ctr.stop(ctr.config.StopTimeout); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ if cleanup {
+ if err := ctr.cleanup(); err != nil {
+ ctrErrors[ctr.ID()] = err
+ }
+ }
+
+ ctr.lock.Unlock()
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
+ }
+
+ return nil, nil
+}
+
+// Pause pauses all containers within a pod that are running.
+// Only running containers will be paused. Paused, stopped, or created
+// containers will be ignored.
+// All containers are paused independently. An error pausing one container
+// will not prevent other containers being paused.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were paused
+// If map is not nil, an error was encountered when pausing one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were paused without error
+func (p *Pod) Pause() (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrErrors := make(map[string]error)
+
+ // Pause to all containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ // Ignore containers that are not running
+ if ctr.state.State != ContainerStateRunning {
+ ctr.lock.Unlock()
+ continue
+ }
+
+ if err := ctr.pause(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ ctr.lock.Unlock()
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, errors.Wrapf(ErrCtrExists, "error pausing some containers")
+ }
+
+ return nil, nil
+}
+
+// Unpause unpauses all containers within a pod that are running.
+// Only paused containers will be unpaused. Running, stopped, or created
+// containers will be ignored.
+// All containers are unpaused independently. An error unpausing one container
+// will not prevent other containers being unpaused.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were unpaused
+// If map is not nil, an error was encountered when unpausing one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were unpaused without error
+func (p *Pod) Unpause() (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrErrors := make(map[string]error)
+
+ // Pause to all containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ // Ignore containers that are not paused
+ if ctr.state.State != ContainerStatePaused {
+ ctr.lock.Unlock()
+ continue
+ }
+
+ if err := ctr.unpause(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ ctr.lock.Unlock()
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, errors.Wrapf(ErrCtrExists, "error unpausing some containers")
+ }
+
+ return nil, nil
+}
+
+// Restart restarts all containers within a pod that are not paused or in an error state.
+// It combines the effects of Stop() and Start() on a container
+// Each container will use its own stop timeout.
+// All containers are started independently, in order dictated by their
+// dependencies. An error restarting one container
+// will not prevent other containers being restarted.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were restarted
+// If map is not nil, an error was encountered when restarting one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were restarted without error
+func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build a dependency graph of containers in the pod
+ graph, err := buildContainerGraph(allCtrs)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
+ }
+
+ ctrErrors := make(map[string]error)
+ ctrsVisited := make(map[string]bool)
+
+ // If there are no containers without dependencies, we can't start
+ // Error out
+ if len(graph.noDepNodes) == 0 {
+ return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ }
+
+ // Traverse the graph beginning at nodes with no dependencies
+ for _, node := range graph.noDepNodes {
+ startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
+ }
+
+ return nil, nil
+}
+
+// Kill sends a signal to all running containers within a pod
+// Signals will only be sent to running containers. Containers that are not
+// running will be ignored. All signals are sent independently, and sending will
+// continue even if some containers encounter errors.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were signalled
+// If map is not nil, an error was encountered when signalling one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were signalled successfully
+func (p *Pod) Kill(signal uint) (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrErrors := make(map[string]error)
+
+ // Send a signal to all containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ // Ignore containers that are not running
+ if ctr.state.State != ContainerStateRunning {
+ ctr.lock.Unlock()
+ continue
+ }
+
+ if err := ctr.runtime.ociRuntime.killContainer(ctr, signal); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ logrus.Debugf("Killed container %s with signal %d", ctr.ID(), signal)
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, nil
+ }
+
+ return nil, nil
+}
+
+// Status gets the status of all containers in the pod
+// Returns a map of Container ID to Container Status
+func (p *Pod) Status() (map[string]ContainerStatus, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ // We need to lock all the containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+ defer ctr.lock.Unlock()
+ }
+
+ // Now that all containers are locked, get their status
+ status := make(map[string]ContainerStatus, len(allCtrs))
+ for _, ctr := range allCtrs {
+ if err := ctr.syncContainer(); err != nil {
+ return nil, err
+ }
+
+ status[ctr.ID()] = ctr.state.State
+ }
+
+ return status, nil
+}
+
+// Inspect returns a PodInspect struct to describe the pod
+func (p *Pod) Inspect() (*PodInspect, error) {
+ var (
+ podContainers []PodContainerInfo
+ )
+
+ containers, err := p.AllContainers()
+ if err != nil {
+ return &PodInspect{}, err
+ }
+ for _, c := range containers {
+ containerStatus := "unknown"
+ // Ignoring possible errors here because we dont want this to be
+ // catastrophic in nature
+ containerState, err := c.State()
+ if err == nil {
+ containerStatus = containerState.String()
+ }
+ pc := PodContainerInfo{
+ ID: c.ID(),
+ State: containerStatus,
+ }
+ podContainers = append(podContainers, pc)
+ }
+
+ config := new(PodConfig)
+ deepcopier.Copy(p.config).To(config)
+ inspectData := PodInspect{
+ Config: config,
+ State: p.state,
+ Containers: podContainers,
+ }
+ return &inspectData, nil
+}
diff --git a/libpod/pod_ffjson.go b/libpod/pod_ffjson.go
index b3012bf5f..3d7140b68 100644
--- a/libpod/pod_ffjson.go
+++ b/libpod/pod_ffjson.go
@@ -607,6 +607,623 @@ done:
}
// MarshalJSON marshal bytes to json - template
+func (j *PodContainerInfo) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *PodContainerInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteString(`,"state":`)
+ fflib.WriteJsonString(buf, string(j.State))
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtPodContainerInfobase = iota
+ ffjtPodContainerInfonosuchkey
+
+ ffjtPodContainerInfoID
+
+ ffjtPodContainerInfoState
+)
+
+var ffjKeyPodContainerInfoID = []byte("id")
+
+var ffjKeyPodContainerInfoState = []byte("state")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *PodContainerInfo) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *PodContainerInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtPodContainerInfobase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtPodContainerInfonosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'i':
+
+ if bytes.Equal(ffjKeyPodContainerInfoID, kn) {
+ currentKey = ffjtPodContainerInfoID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 's':
+
+ if bytes.Equal(ffjKeyPodContainerInfoState, kn) {
+ currentKey = ffjtPodContainerInfoState
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyPodContainerInfoState, kn) {
+ currentKey = ffjtPodContainerInfoState
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyPodContainerInfoID, kn) {
+ currentKey = ffjtPodContainerInfoID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtPodContainerInfonosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtPodContainerInfoID:
+ goto handle_ID
+
+ case ffjtPodContainerInfoState:
+ goto handle_State
+
+ case ffjtPodContainerInfonosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_State:
+
+ /* handler: j.State type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.State = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *PodInspect) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *PodInspect) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if j.Config != nil {
+ buf.WriteString(`{"Config":`)
+
+ {
+
+ err = j.Config.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ } else {
+ buf.WriteString(`{"Config":null`)
+ }
+ if j.State != nil {
+ buf.WriteString(`,"State":`)
+
+ {
+
+ err = j.State.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ } else {
+ buf.WriteString(`,"State":null`)
+ }
+ buf.WriteString(`,"Containers":`)
+ if j.Containers != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.Containers {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+
+ {
+
+ err = v.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtPodInspectbase = iota
+ ffjtPodInspectnosuchkey
+
+ ffjtPodInspectConfig
+
+ ffjtPodInspectState
+
+ ffjtPodInspectContainers
+)
+
+var ffjKeyPodInspectConfig = []byte("Config")
+
+var ffjKeyPodInspectState = []byte("State")
+
+var ffjKeyPodInspectContainers = []byte("Containers")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *PodInspect) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *PodInspect) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtPodInspectbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtPodInspectnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'C':
+
+ if bytes.Equal(ffjKeyPodInspectConfig, kn) {
+ currentKey = ffjtPodInspectConfig
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyPodInspectContainers, kn) {
+ currentKey = ffjtPodInspectContainers
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'S':
+
+ if bytes.Equal(ffjKeyPodInspectState, kn) {
+ currentKey = ffjtPodInspectState
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyPodInspectContainers, kn) {
+ currentKey = ffjtPodInspectContainers
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyPodInspectState, kn) {
+ currentKey = ffjtPodInspectState
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyPodInspectConfig, kn) {
+ currentKey = ffjtPodInspectConfig
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtPodInspectnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtPodInspectConfig:
+ goto handle_Config
+
+ case ffjtPodInspectState:
+ goto handle_State
+
+ case ffjtPodInspectContainers:
+ goto handle_Containers
+
+ case ffjtPodInspectnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Config:
+
+ /* handler: j.Config type=libpod.PodConfig kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ j.Config = nil
+
+ } else {
+
+ if j.Config == nil {
+ j.Config = new(PodConfig)
+ }
+
+ err = j.Config.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_State:
+
+ /* handler: j.State type=libpod.podState kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ j.State = nil
+
+ } else {
+
+ if j.State == nil {
+ j.State = new(podState)
+ }
+
+ err = j.State.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Containers:
+
+ /* handler: j.Containers type=[]libpod.PodContainerInfo kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Containers = nil
+ } else {
+
+ j.Containers = []PodContainerInfo{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJContainers PodContainerInfo
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJContainers type=libpod.PodContainerInfo kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ err = tmpJContainers.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ j.Containers = append(j.Containers, tmpJContainers)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
func (j *podState) MarshalJSON() ([]byte, error) {
var buf fflib.Buffer
if j == nil {
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
new file mode 100644
index 000000000..9102ae28a
--- /dev/null
+++ b/libpod/pod_internal.go
@@ -0,0 +1,177 @@
+package libpod
+
+import (
+ "context"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/storage"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Creates a new, empty pod
+func newPod(lockDir string, runtime *Runtime) (*Pod, error) {
+ pod := new(Pod)
+ pod.config = new(PodConfig)
+ pod.config.ID = stringid.GenerateNonCryptoID()
+ pod.config.Labels = make(map[string]string)
+ pod.config.CreatedTime = time.Now()
+ pod.state = new(podState)
+ pod.runtime = runtime
+
+ // Path our lock file will reside at
+ lockPath := filepath.Join(lockDir, pod.config.ID)
+ // Grab a lockfile at the given path
+ lock, err := storage.GetLockfile(lockPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating lockfile for new pod")
+ }
+ pod.lock = lock
+
+ return pod, nil
+}
+
+// Update pod state from database
+func (p *Pod) updatePod() error {
+ if err := p.runtime.state.UpdatePod(p); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Save pod state to database
+func (p *Pod) save() error {
+ if err := p.runtime.state.SavePod(p); err != nil {
+ return errors.Wrapf(err, "error saving pod %s state")
+ }
+
+ return nil
+}
+
+// Refresh a pod's state after restart
+func (p *Pod) refresh() error {
+ // Need to to an update from the DB to pull potentially-missing state
+ if err := p.runtime.state.UpdatePod(p); err != nil {
+ return err
+ }
+
+ if !p.valid {
+ return ErrPodRemoved
+ }
+
+ // We need to recreate the pod's cgroup
+ if p.config.UsePodCgroup {
+ switch p.runtime.config.CgroupManager {
+ case SystemdCgroupsManager:
+ // NOOP for now, until proper systemd cgroup management
+ // is implemented
+ case CgroupfsCgroupsManager:
+ p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID())
+
+ logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
+ default:
+ return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
+ }
+ }
+
+ // Save changes
+ return p.save()
+}
+
+// Visit a node on a container graph and start the container, or set an error if
+// a dependency failed to start. if restart is true, startNode will restart the node instead of starting it.
+func startNode(ctx context.Context, node *containerNode, setError bool, ctrErrors map[string]error, ctrsVisited map[string]bool, restart bool) {
+ // First, check if we have already visited the node
+ if ctrsVisited[node.id] {
+ return
+ }
+
+ // If setError is true, a dependency of us failed
+ // Mark us as failed and recurse
+ if setError {
+ // Mark us as visited, and set an error
+ ctrsVisited[node.id] = true
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
+
+ // Hit anyone who depends on us, and set errors on them too
+ for _, successor := range node.dependedOn {
+ startNode(ctx, successor, true, ctrErrors, ctrsVisited, restart)
+ }
+
+ return
+ }
+
+ // Have all our dependencies started?
+ // If not, don't visit the node yet
+ depsVisited := true
+ for _, dep := range node.dependsOn {
+ depsVisited = depsVisited && ctrsVisited[dep.id]
+ }
+ if !depsVisited {
+ // Don't visit us yet, all dependencies are not up
+ // We'll hit the dependencies eventually, and when we do it will
+ // recurse here
+ return
+ }
+
+ // Going to try to start the container, mark us as visited
+ ctrsVisited[node.id] = true
+
+ ctrErrored := false
+
+ // Check if dependencies are running
+ // Graph traversal means we should have started them
+ // But they could have died before we got here
+ // Does not require that the container be locked, we only need to lock
+ // the dependencies
+ depsStopped, err := node.container.checkDependenciesRunning()
+ if err != nil {
+ ctrErrors[node.id] = err
+ ctrErrored = true
+ } else if len(depsStopped) > 0 {
+ // Our dependencies are not running
+ depsList := strings.Join(depsStopped, ",")
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
+ ctrErrored = true
+ }
+
+ // Lock before we start
+ node.container.lock.Lock()
+
+ // Sync the container to pick up current state
+ if !ctrErrored {
+ if err := node.container.syncContainer(); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+
+ // Start the container (only if it is not running)
+ if !ctrErrored {
+ if !restart && node.container.state.State != ContainerStateRunning {
+ if err := node.container.initAndStart(ctx); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+ if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
+ if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+ }
+
+ node.container.lock.Unlock()
+
+ // Recurse to anyone who depends on us and start them
+ for _, successor := range node.dependedOn {
+ startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
+ }
+
+ return
+}