aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Dockerfile6
-rw-r--r--Dockerfile.CentOS6
-rw-r--r--Dockerfile.Fedora6
-rw-r--r--Makefile2
-rw-r--r--changelog.txt42
-rw-r--r--cmd/podman/search.go2
-rw-r--r--contrib/spec/podman.spec.in2
-rw-r--r--libpod/container_api.go2
-rw-r--r--libpod/container_internal.go7
-rw-r--r--libpod/container_internal_linux.go6
-rw-r--r--libpod/in_memory_state.go2
-rw-r--r--libpod/info.go2
-rw-r--r--libpod/pod.go601
-rw-r--r--libpod/pod_api.go436
-rw-r--r--libpod/pod_ffjson.go839
-rw-r--r--libpod/pod_internal.go177
-rw-r--r--libpod/runtime.go6
-rw-r--r--libpod/runtime_ctr.go2
-rw-r--r--libpod/stats.go12
-rw-r--r--libpod/util.go24
-rw-r--r--vendor/github.com/docker/docker/pkg/stringid/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/stringid/stringid.go99
-rw-r--r--vendor/github.com/docker/docker/pkg/truncindex/truncindex.go139
-rw-r--r--version/version.go2
24 files changed, 1557 insertions, 866 deletions
diff --git a/Dockerfile b/Dockerfile
index fcbf44296..24be23e46 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -104,6 +104,12 @@ RUN set -x \
&& export GOPATH=/go \
&& go get github.com/onsi/gomega/...
+# Install ffjson
+RUN set -x \
+ && export GOPATH=/go \
+ && go get github.com/pquerna/ffjson \
+ && install -D -m 755 "$GOPATH"/bin/ffjson /usr/bin/
+
# Install cni config
#RUN make install.cni
RUN mkdir -p /etc/cni/net.d/
diff --git a/Dockerfile.CentOS b/Dockerfile.CentOS
index 5eea63e68..ccd4fd154 100644
--- a/Dockerfile.CentOS
+++ b/Dockerfile.CentOS
@@ -61,6 +61,12 @@ RUN set -x \
&& export GOPATH=/go \
&& go get github.com/onsi/gomega/...
+# Install ffjson
+RUN set -x \
+ && export GOPATH=/go \
+ && go get github.com/pquerna/ffjson \
+ && install -D -m 755 "$GOPATH"/bin/ffjson /usr/bin/
+
# Install conmon
ENV CRIO_COMMIT 662dbb31b5d4f5ed54511a47cde7190c61c28677
RUN set -x \
diff --git a/Dockerfile.Fedora b/Dockerfile.Fedora
index 1bbcbdbf7..983b754b9 100644
--- a/Dockerfile.Fedora
+++ b/Dockerfile.Fedora
@@ -63,6 +63,12 @@ RUN set -x \
&& export GOPATH=/go \
&& go get github.com/onsi/gomega/...
+# Install ffjson
+RUN set -x \
+ && export GOPATH=/go \
+ && go get github.com/pquerna/ffjson \
+ && install -D -m 755 "$GOPATH"/bin/ffjson /usr/bin/
+
# Install conmon
ENV CRIO_COMMIT 662dbb31b5d4f5ed54511a47cde7190c61c28677
RUN set -x \
diff --git a/Makefile b/Makefile
index 2a33d77b1..c87d09a3b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
GO ?= go
DESTDIR ?= /
-EPOCH_TEST_COMMIT ?= 7462ebe830b256e9e145d133c824de5dfd23045d
+EPOCH_TEST_COMMIT ?= 92e9d7891e2d68b119936509e780f3a3d93d8780
HEAD ?= HEAD
CHANGELOG_BASE ?= HEAD~
CHANGELOG_TARGET ?= HEAD
diff --git a/changelog.txt b/changelog.txt
index c9b384012..75ff286f4 100644
--- a/changelog.txt
+++ b/changelog.txt
@@ -1,3 +1,45 @@
+- Changelog for v0.8.2 (2018-08-10)
+ * We need to sort mounts so that one mount does not over mount another.
+ * search name should include registry
+ * Split pod.go into 3 files
+ * Make errors during refresh nonfatal
+ * Add batch check to container stats lock
+ * removeContainer: fix deadlock
+ * Add FFJSON to build container
+ * Don't require .gopathok for individual FFJSON targets
+ * Add FFJSON generation to makefile
+ * Re-add FFJSON for container and pod structs
+ * Fixed a bug setting dependencies on the wrong container
+ * Always connect to the stdout and stderr of stream
+ * apparmor: respect "unconfined" setting
+ * oci.go: syslog: fix debug formatting
+ * add podman pod inspect
+ * Fix ambiguity in adding localhost to podman save
+ * Fix CGroupFS cgroup manager cgroup creation for pods
+ * Update Conmon commit for testing
+ * Pass newly-added --log-level flag to Conmon
+ * Cleanup man pages
+ * Improve ps handling of container start/stop time
+ * rootless: fix user lookup if USER= is not set
+ * enabled copr epel builds again
+ * Handle yum and dnf
+ * Test regressions against the RPM spec file
+ * Pass DESTDIR down to python Makefile
+ * Add dpkg support for returning oci/conmon versions
+ * Have info print conmon/oci runtime information
+ * Better pull error for fully-qualified images
+ * Stub varlink pod methods.
+ * Remove inotify work around
+ * Rename varlink socket and interface
+ * Change tarball filename in copr prepare and match short-commit length
+ * Add Runc and Conmon versions to Podman Version
+ * update copr spec, lets get it building again
+ * Add missing dependencies to build system
+ * Port to MacOS
+ * Make one runtime for the varlink service
+ * Bump gitvalidation epoch
+ * Bump to v0.8.2-dev
+
- Changelog for v0.8.1 (2018-08-03)
* Vendor in latest github.com/projectatomic/buildah
* Update gitvalidation epoch
diff --git a/cmd/podman/search.go b/cmd/podman/search.go
index b165411ee..d71432fc6 100644
--- a/cmd/podman/search.go
+++ b/cmd/podman/search.go
@@ -264,7 +264,7 @@ func getSearchOutput(term string, regAndSkipTLS map[string]bool, opts searchOpts
if len(description) > 44 && !opts.noTrunc {
description = description[:descriptionTruncLength] + "..."
}
- name := index + "/" + results[i].Name
+ name := reg + "/" + results[i].Name
if index == "docker.io" && !strings.Contains(results[i].Name, "/") {
name = index + "/library/" + results[i].Name
}
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 403379340..31a70fbde 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -33,7 +33,7 @@
%global shortcommit0 %(c=%{commit0}; echo ${c:0:8})
Name: podman
-Version: 0.8.2
+Version: 0.8.3
Release: #COMMITDATE#.git%{shortcommit0}%{?dist}
Summary: Manage Pods, Containers and Container Images
License: ASL 2.0
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 73fd96960..62281218f 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -8,8 +8,8 @@ import (
"strings"
"time"
+ "github.com/containers/storage/pkg/stringid"
"github.com/docker/docker/daemon/caps"
- "github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"github.com/projectatomic/libpod/libpod/driver"
"github.com/projectatomic/libpod/pkg/inspect"
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 7b5932541..535f34200 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -16,8 +16,8 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
- "github.com/docker/docker/pkg/mount"
- "github.com/docker/docker/pkg/stringid"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -829,7 +829,6 @@ func (c *Container) cleanupStorage() error {
logrus.Debugf("Storage is already unmounted, skipping...")
return nil
}
-
for _, mount := range c.config.Mounts {
if err := c.unmountSHM(mount); err != nil {
return err
@@ -1178,7 +1177,7 @@ func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator)
mount := spec.Mount{
Destination: k,
Type: "bind",
- Options: []string{"rbind", "rw"},
+ Options: []string{"private", "bind", "rw"},
}
if MountExists(g.Mounts(), k) {
continue
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index e7e3b6ce9..59fb6af87 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -248,6 +248,12 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
g.SetLinuxCgroupsPath(cgroupPath)
}
+ // Mounts need to be sorted so paths will not cover other paths
+ mounts := sortMounts(g.Mounts())
+ g.ClearMounts()
+ for _, m := range mounts {
+ g.AddMount(m)
+ }
return g.Config, nil
}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 8bdd0881c..0327b331e 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -3,7 +3,7 @@ package libpod
import (
"strings"
- "github.com/docker/docker/pkg/truncindex"
+ "github.com/containers/storage/pkg/truncindex"
"github.com/pkg/errors"
"github.com/projectatomic/libpod/pkg/registrar"
)
diff --git a/libpod/info.go b/libpod/info.go
index 5bb77f447..1108845ea 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -10,7 +10,7 @@ import (
"strings"
"time"
- "github.com/docker/docker/pkg/system"
+ "github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"github.com/projectatomic/libpod/utils"
)
diff --git a/libpod/pod.go b/libpod/pod.go
index 46182680a..5d762190b 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -1,16 +1,9 @@
package libpod
import (
- "context"
- "path/filepath"
- "strings"
"time"
"github.com/containers/storage"
- "github.com/docker/docker/pkg/stringid"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/ulule/deepcopier"
)
// Pod represents a group of containers that are managed together.
@@ -48,7 +41,7 @@ type PodConfig struct {
// join containers to it.
// If true, all containers joined to the pod will use the pod cgroup as
// their cgroup parent, and cannot set a different cgroup parent
- UsePodCgroup bool
+ UsePodCgroup bool `json:"usePodCgroup"`
// Time pod was created
CreatedTime time.Time `json:"created"`
@@ -57,17 +50,22 @@ type PodConfig struct {
// podState represents a pod's state
type podState struct {
// CgroupPath is the path to the pod's CGroup
- CgroupPath string
+ CgroupPath string `json:"cgroupPath"`
}
// PodInspect represents the data we want to display for
// podman pod inspect
type PodInspect struct {
Config *PodConfig
- State *podState
+ State *PodInspectState
Containers []PodContainerInfo
}
+// PodInspectState contains inspect data on the pod's state
+type PodInspectState struct {
+ CgroupPath string `json:"cgroupPath"`
+}
+
// PodContainerInfo keeps information on a container in a pod
type PodContainerInfo struct {
ID string `json:"id"`
@@ -127,520 +125,6 @@ func (p *Pod) CgroupPath() (string, error) {
return p.state.CgroupPath, nil
}
-// Creates a new, empty pod
-func newPod(lockDir string, runtime *Runtime) (*Pod, error) {
- pod := new(Pod)
- pod.config = new(PodConfig)
- pod.config.ID = stringid.GenerateNonCryptoID()
- pod.config.Labels = make(map[string]string)
- pod.config.CreatedTime = time.Now()
- pod.state = new(podState)
- pod.runtime = runtime
-
- // Path our lock file will reside at
- lockPath := filepath.Join(lockDir, pod.config.ID)
- // Grab a lockfile at the given path
- lock, err := storage.GetLockfile(lockPath)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating lockfile for new pod")
- }
- pod.lock = lock
-
- return pod, nil
-}
-
-// Update pod state from database
-func (p *Pod) updatePod() error {
- if err := p.runtime.state.UpdatePod(p); err != nil {
- return err
- }
-
- return nil
-}
-
-// Save pod state to database
-func (p *Pod) save() error {
- if err := p.runtime.state.SavePod(p); err != nil {
- return errors.Wrapf(err, "error saving pod %s state")
- }
-
- return nil
-}
-
-// Refresh a pod's state after restart
-func (p *Pod) refresh() error {
- // Need to to an update from the DB to pull potentially-missing state
- if err := p.runtime.state.UpdatePod(p); err != nil {
- return err
- }
-
- if !p.valid {
- return ErrPodRemoved
- }
-
- // We need to recreate the pod's cgroup
- if p.config.UsePodCgroup {
- switch p.runtime.config.CgroupManager {
- case SystemdCgroupsManager:
- // NOOP for now, until proper systemd cgroup management
- // is implemented
- case CgroupfsCgroupsManager:
- p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID())
-
- logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
- default:
- return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
- }
- }
-
- // Save changes
- return p.save()
-}
-
-// Start starts all containers within a pod
-// It combines the effects of Init() and Start() on a container
-// If a container has already been initialized it will be started,
-// otherwise it will be initialized then started.
-// Containers that are already running or have been paused are ignored
-// All containers are started independently, in order dictated by their
-// dependencies.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were started
-// If map is not nil, an error was encountered when starting one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were started successfully
-func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- // Build a dependency graph of containers in the pod
- graph, err := buildContainerGraph(allCtrs)
- if err != nil {
- return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
- }
-
- ctrErrors := make(map[string]error)
- ctrsVisited := make(map[string]bool)
-
- // If there are no containers without dependencies, we can't start
- // Error out
- if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
- }
-
- // Traverse the graph beginning at nodes with no dependencies
- for _, node := range graph.noDepNodes {
- startNode(ctx, node, false, ctrErrors, ctrsVisited, false)
- }
-
- return ctrErrors, nil
-}
-
-// Visit a node on a container graph and start the container, or set an error if
-// a dependency failed to start. if restart is true, startNode will restart the node instead of starting it.
-func startNode(ctx context.Context, node *containerNode, setError bool, ctrErrors map[string]error, ctrsVisited map[string]bool, restart bool) {
- // First, check if we have already visited the node
- if ctrsVisited[node.id] {
- return
- }
-
- // If setError is true, a dependency of us failed
- // Mark us as failed and recurse
- if setError {
- // Mark us as visited, and set an error
- ctrsVisited[node.id] = true
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
-
- // Hit anyone who depends on us, and set errors on them too
- for _, successor := range node.dependedOn {
- startNode(ctx, successor, true, ctrErrors, ctrsVisited, restart)
- }
-
- return
- }
-
- // Have all our dependencies started?
- // If not, don't visit the node yet
- depsVisited := true
- for _, dep := range node.dependsOn {
- depsVisited = depsVisited && ctrsVisited[dep.id]
- }
- if !depsVisited {
- // Don't visit us yet, all dependencies are not up
- // We'll hit the dependencies eventually, and when we do it will
- // recurse here
- return
- }
-
- // Going to try to start the container, mark us as visited
- ctrsVisited[node.id] = true
-
- ctrErrored := false
-
- // Check if dependencies are running
- // Graph traversal means we should have started them
- // But they could have died before we got here
- // Does not require that the container be locked, we only need to lock
- // the dependencies
- depsStopped, err := node.container.checkDependenciesRunning()
- if err != nil {
- ctrErrors[node.id] = err
- ctrErrored = true
- } else if len(depsStopped) > 0 {
- // Our dependencies are not running
- depsList := strings.Join(depsStopped, ",")
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
- ctrErrored = true
- }
-
- // Lock before we start
- node.container.lock.Lock()
-
- // Sync the container to pick up current state
- if !ctrErrored {
- if err := node.container.syncContainer(); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
-
- // Start the container (only if it is not running)
- if !ctrErrored {
- if !restart && node.container.state.State != ContainerStateRunning {
- if err := node.container.initAndStart(ctx); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
- if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
- if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
- }
-
- node.container.lock.Unlock()
-
- // Recurse to anyone who depends on us and start them
- for _, successor := range node.dependedOn {
- startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
- }
-
- return
-}
-
-// Stop stops all containers within a pod that are not already stopped
-// Each container will use its own stop timeout
-// Only running containers will be stopped. Paused, stopped, or created
-// containers will be ignored.
-// If cleanup is true, mounts and network namespaces will be cleaned up after
-// the container is stopped.
-// All containers are stopped independently. An error stopping one container
-// will not prevent other containers being stopped.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were stopped
-// If map is not nil, an error was encountered when stopping one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were stopped without error
-func (p *Pod) Stop(cleanup bool) (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- ctrErrors := make(map[string]error)
-
- // TODO: There may be cases where it makes sense to order stops based on
- // dependencies. Should we bother with this?
-
- // Stop to all containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
-
- if err := ctr.syncContainer(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- // Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
- ctr.lock.Unlock()
- continue
- }
-
- if err := ctr.stop(ctr.config.StopTimeout); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- if cleanup {
- if err := ctr.cleanup(); err != nil {
- ctrErrors[ctr.ID()] = err
- }
- }
-
- ctr.lock.Unlock()
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
- }
-
- return nil, nil
-}
-
-// Pause pauses all containers within a pod that are running.
-// Only running containers will be paused. Paused, stopped, or created
-// containers will be ignored.
-// All containers are paused independently. An error pausing one container
-// will not prevent other containers being paused.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were paused
-// If map is not nil, an error was encountered when pausing one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were paused without error
-func (p *Pod) Pause() (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- ctrErrors := make(map[string]error)
-
- // Pause to all containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
-
- if err := ctr.syncContainer(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- // Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
- ctr.lock.Unlock()
- continue
- }
-
- if err := ctr.pause(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- ctr.lock.Unlock()
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error pausing some containers")
- }
-
- return nil, nil
-}
-
-// Unpause unpauses all containers within a pod that are running.
-// Only paused containers will be unpaused. Running, stopped, or created
-// containers will be ignored.
-// All containers are unpaused independently. An error unpausing one container
-// will not prevent other containers being unpaused.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were unpaused
-// If map is not nil, an error was encountered when unpausing one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were unpaused without error
-func (p *Pod) Unpause() (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- ctrErrors := make(map[string]error)
-
- // Pause to all containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
-
- if err := ctr.syncContainer(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- // Ignore containers that are not paused
- if ctr.state.State != ContainerStatePaused {
- ctr.lock.Unlock()
- continue
- }
-
- if err := ctr.unpause(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- ctr.lock.Unlock()
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error unpausing some containers")
- }
-
- return nil, nil
-}
-
-// Restart restarts all containers within a pod that are not paused or in an error state.
-// It combines the effects of Stop() and Start() on a container
-// Each container will use its own stop timeout.
-// All containers are started independently, in order dictated by their
-// dependencies. An error restarting one container
-// will not prevent other containers being restarted.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were restarted
-// If map is not nil, an error was encountered when restarting one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were restarted without error
-func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- // Build a dependency graph of containers in the pod
- graph, err := buildContainerGraph(allCtrs)
- if err != nil {
- return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
- }
-
- ctrErrors := make(map[string]error)
- ctrsVisited := make(map[string]bool)
-
- // If there are no containers without dependencies, we can't start
- // Error out
- if len(graph.noDepNodes) == 0 {
- return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
- }
-
- // Traverse the graph beginning at nodes with no dependencies
- for _, node := range graph.noDepNodes {
- startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
- }
-
- return nil, nil
-}
-
-// Kill sends a signal to all running containers within a pod
-// Signals will only be sent to running containers. Containers that are not
-// running will be ignored. All signals are sent independently, and sending will
-// continue even if some containers encounter errors.
-// An error and a map[string]error are returned
-// If the error is not nil and the map is nil, an error was encountered before
-// any containers were signalled
-// If map is not nil, an error was encountered when signalling one or more
-// containers. The container ID is mapped to the error encountered. The error is
-// set to ErrCtrExists
-// If both error and the map are nil, all containers were signalled successfully
-func (p *Pod) Kill(signal uint) (map[string]error, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- ctrErrors := make(map[string]error)
-
- // Send a signal to all containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
-
- if err := ctr.syncContainer(); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- // Ignore containers that are not running
- if ctr.state.State != ContainerStateRunning {
- ctr.lock.Unlock()
- continue
- }
-
- if err := ctr.runtime.ociRuntime.killContainer(ctr, signal); err != nil {
- ctr.lock.Unlock()
- ctrErrors[ctr.ID()] = err
- continue
- }
-
- logrus.Debugf("Killed container %s with signal %d", ctr.ID(), signal)
- }
-
- if len(ctrErrors) > 0 {
- return ctrErrors, nil
- }
-
- return nil, nil
-}
-
// HasContainer checks if a container is present in the pod
func (p *Pod) HasContainer(id string) (bool, error) {
if !p.valid {
@@ -674,75 +158,6 @@ func (p *Pod) AllContainers() ([]*Container, error) {
return p.runtime.state.PodContainers(p)
}
-// Status gets the status of all containers in the pod
-// Returns a map of Container ID to Container Status
-func (p *Pod) Status() (map[string]ContainerStatus, error) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if !p.valid {
- return nil, ErrPodRemoved
- }
-
- allCtrs, err := p.runtime.state.PodContainers(p)
- if err != nil {
- return nil, err
- }
-
- // We need to lock all the containers
- for _, ctr := range allCtrs {
- ctr.lock.Lock()
- defer ctr.lock.Unlock()
- }
-
- // Now that all containers are locked, get their status
- status := make(map[string]ContainerStatus, len(allCtrs))
- for _, ctr := range allCtrs {
- if err := ctr.syncContainer(); err != nil {
- return nil, err
- }
-
- status[ctr.ID()] = ctr.state.State
- }
-
- return status, nil
-}
-
// TODO add pod batching
// Lock pod to avoid lock contention
// Store and lock all containers (no RemoveContainer in batch guarantees cache will not become stale)
-
-// Inspect returns a PodInspect struct to describe the pod
-func (p *Pod) Inspect() (*PodInspect, error) {
- var (
- podContainers []PodContainerInfo
- )
-
- containers, err := p.AllContainers()
- if err != nil {
- return &PodInspect{}, err
- }
- for _, c := range containers {
- containerStatus := "unknown"
- // Ignoring possible errors here because we dont want this to be
- // catastrophic in nature
- containerState, err := c.State()
- if err == nil {
- containerStatus = containerState.String()
- }
- pc := PodContainerInfo{
- ID: c.ID(),
- State: containerStatus,
- }
- podContainers = append(podContainers, pc)
- }
-
- config := new(PodConfig)
- deepcopier.Copy(p.config).To(config)
- inspectData := PodInspect{
- Config: config,
- State: p.state,
- Containers: podContainers,
- }
- return &inspectData, nil
-}
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
new file mode 100644
index 000000000..82cf7b727
--- /dev/null
+++ b/libpod/pod_api.go
@@ -0,0 +1,436 @@
+package libpod
+
+import (
+ "context"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/ulule/deepcopier"
+)
+
+// Start starts all containers within a pod
+// It combines the effects of Init() and Start() on a container
+// If a container has already been initialized it will be started,
+// otherwise it will be initialized then started.
+// Containers that are already running or have been paused are ignored
+// All containers are started independently, in order dictated by their
+// dependencies.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were started
+// If map is not nil, an error was encountered when starting one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were started successfully
+func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build a dependency graph of containers in the pod
+ graph, err := buildContainerGraph(allCtrs)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
+ }
+
+ ctrErrors := make(map[string]error)
+ ctrsVisited := make(map[string]bool)
+
+ // If there are no containers without dependencies, we can't start
+ // Error out
+ if len(graph.noDepNodes) == 0 {
+ return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ }
+
+ // Traverse the graph beginning at nodes with no dependencies
+ for _, node := range graph.noDepNodes {
+ startNode(ctx, node, false, ctrErrors, ctrsVisited, false)
+ }
+
+ return ctrErrors, nil
+}
+
+// Stop stops all containers within a pod that are not already stopped
+// Each container will use its own stop timeout
+// Only running containers will be stopped. Paused, stopped, or created
+// containers will be ignored.
+// If cleanup is true, mounts and network namespaces will be cleaned up after
+// the container is stopped.
+// All containers are stopped independently. An error stopping one container
+// will not prevent other containers being stopped.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were stopped
+// If map is not nil, an error was encountered when stopping one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were stopped without error
+func (p *Pod) Stop(cleanup bool) (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrErrors := make(map[string]error)
+
+ // TODO: There may be cases where it makes sense to order stops based on
+ // dependencies. Should we bother with this?
+
+ // Stop to all containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ // Ignore containers that are not running
+ if ctr.state.State != ContainerStateRunning {
+ ctr.lock.Unlock()
+ continue
+ }
+
+ if err := ctr.stop(ctr.config.StopTimeout); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ if cleanup {
+ if err := ctr.cleanup(); err != nil {
+ ctrErrors[ctr.ID()] = err
+ }
+ }
+
+ ctr.lock.Unlock()
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
+ }
+
+ return nil, nil
+}
+
+// Pause pauses all containers within a pod that are running.
+// Only running containers will be paused. Paused, stopped, or created
+// containers will be ignored.
+// All containers are paused independently. An error pausing one container
+// will not prevent other containers being paused.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were paused
+// If map is not nil, an error was encountered when pausing one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were paused without error
+func (p *Pod) Pause() (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrErrors := make(map[string]error)
+
+ // Pause to all containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ // Ignore containers that are not running
+ if ctr.state.State != ContainerStateRunning {
+ ctr.lock.Unlock()
+ continue
+ }
+
+ if err := ctr.pause(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ ctr.lock.Unlock()
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, errors.Wrapf(ErrCtrExists, "error pausing some containers")
+ }
+
+ return nil, nil
+}
+
+// Unpause unpauses all containers within a pod that are running.
+// Only paused containers will be unpaused. Running, stopped, or created
+// containers will be ignored.
+// All containers are unpaused independently. An error unpausing one container
+// will not prevent other containers being unpaused.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were unpaused
+// If map is not nil, an error was encountered when unpausing one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were unpaused without error
+func (p *Pod) Unpause() (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrErrors := make(map[string]error)
+
+ // Pause to all containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ // Ignore containers that are not paused
+ if ctr.state.State != ContainerStatePaused {
+ ctr.lock.Unlock()
+ continue
+ }
+
+ if err := ctr.unpause(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ ctr.lock.Unlock()
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, errors.Wrapf(ErrCtrExists, "error unpausing some containers")
+ }
+
+ return nil, nil
+}
+
+// Restart restarts all containers within a pod that are not paused or in an error state.
+// It combines the effects of Stop() and Start() on a container
+// Each container will use its own stop timeout.
+// All containers are started independently, in order dictated by their
+// dependencies. An error restarting one container
+// will not prevent other containers being restarted.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were restarted
+// If map is not nil, an error was encountered when restarting one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were restarted without error
+func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build a dependency graph of containers in the pod
+ graph, err := buildContainerGraph(allCtrs)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
+ }
+
+ ctrErrors := make(map[string]error)
+ ctrsVisited := make(map[string]bool)
+
+ // If there are no containers without dependencies, we can't start
+ // Error out
+ if len(graph.noDepNodes) == 0 {
+ return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
+ }
+
+ // Traverse the graph beginning at nodes with no dependencies
+ for _, node := range graph.noDepNodes {
+ startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers")
+ }
+
+ return nil, nil
+}
+
+// Kill sends a signal to all running containers within a pod
+// Signals will only be sent to running containers. Containers that are not
+// running will be ignored. All signals are sent independently, and sending will
+// continue even if some containers encounter errors.
+// An error and a map[string]error are returned
+// If the error is not nil and the map is nil, an error was encountered before
+// any containers were signalled
+// If map is not nil, an error was encountered when signalling one or more
+// containers. The container ID is mapped to the error encountered. The error is
+// set to ErrCtrExists
+// If both error and the map are nil, all containers were signalled successfully
+func (p *Pod) Kill(signal uint) (map[string]error, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ ctrErrors := make(map[string]error)
+
+ // Send a signal to all containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+
+ if err := ctr.syncContainer(); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ // Ignore containers that are not running
+ if ctr.state.State != ContainerStateRunning {
+ ctr.lock.Unlock()
+ continue
+ }
+
+ if err := ctr.runtime.ociRuntime.killContainer(ctr, signal); err != nil {
+ ctr.lock.Unlock()
+ ctrErrors[ctr.ID()] = err
+ continue
+ }
+
+ logrus.Debugf("Killed container %s with signal %d", ctr.ID(), signal)
+ }
+
+ if len(ctrErrors) > 0 {
+ return ctrErrors, nil
+ }
+
+ return nil, nil
+}
+
+// Status gets the status of all containers in the pod
+// Returns a map of Container ID to Container Status
+func (p *Pod) Status() (map[string]ContainerStatus, error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return nil, err
+ }
+
+ // We need to lock all the containers
+ for _, ctr := range allCtrs {
+ ctr.lock.Lock()
+ defer ctr.lock.Unlock()
+ }
+
+ // Now that all containers are locked, get their status
+ status := make(map[string]ContainerStatus, len(allCtrs))
+ for _, ctr := range allCtrs {
+ if err := ctr.syncContainer(); err != nil {
+ return nil, err
+ }
+
+ status[ctr.ID()] = ctr.state.State
+ }
+
+ return status, nil
+}
+
+// Inspect returns a PodInspect struct to describe the pod
+func (p *Pod) Inspect() (*PodInspect, error) {
+ var (
+ podContainers []PodContainerInfo
+ )
+
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ if err := p.updatePod(); err != nil {
+ return nil, err
+ }
+
+ containers, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return &PodInspect{}, err
+ }
+ for _, c := range containers {
+ containerStatus := "unknown"
+ // Ignoring possible errors here because we dont want this to be
+ // catastrophic in nature
+ containerState, err := c.State()
+ if err == nil {
+ containerStatus = containerState.String()
+ }
+ pc := PodContainerInfo{
+ ID: c.ID(),
+ State: containerStatus,
+ }
+ podContainers = append(podContainers, pc)
+ }
+
+ config := new(PodConfig)
+ deepcopier.Copy(p.config).To(config)
+ inspectData := PodInspect{
+ Config: config,
+ State: &PodInspectState{
+ CgroupPath: p.state.CgroupPath,
+ },
+ Containers: podContainers,
+ }
+ return &inspectData, nil
+}
diff --git a/libpod/pod_ffjson.go b/libpod/pod_ffjson.go
index b3012bf5f..58b08d61b 100644
--- a/libpod/pod_ffjson.go
+++ b/libpod/pod_ffjson.go
@@ -60,9 +60,9 @@ func (j *PodConfig) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
buf.WriteString(`,"cgroupParent":`)
fflib.WriteJsonString(buf, string(j.CgroupParent))
if j.UsePodCgroup {
- buf.WriteString(`,"UsePodCgroup":true`)
+ buf.WriteString(`,"usePodCgroup":true`)
} else {
- buf.WriteString(`,"UsePodCgroup":false`)
+ buf.WriteString(`,"usePodCgroup":false`)
}
buf.WriteString(`,"created":`)
@@ -108,7 +108,7 @@ var ffjKeyPodConfigLabels = []byte("labels")
var ffjKeyPodConfigCgroupParent = []byte("cgroupParent")
-var ffjKeyPodConfigUsePodCgroup = []byte("UsePodCgroup")
+var ffjKeyPodConfigUsePodCgroup = []byte("usePodCgroup")
var ffjKeyPodConfigCreatedTime = []byte("created")
@@ -173,14 +173,6 @@ mainparse:
} else {
switch kn[0] {
- case 'U':
-
- if bytes.Equal(ffjKeyPodConfigUsePodCgroup, kn) {
- currentKey = ffjtPodConfigUsePodCgroup
- state = fflib.FFParse_want_colon
- goto mainparse
- }
-
case 'c':
if bytes.Equal(ffjKeyPodConfigCgroupParent, kn) {
@@ -223,6 +215,14 @@ mainparse:
goto mainparse
}
+ case 'u':
+
+ if bytes.Equal(ffjKeyPodConfigUsePodCgroup, kn) {
+ currentKey = ffjtPodConfigUsePodCgroup
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
}
if fflib.SimpleLetterEqualFold(ffjKeyPodConfigCreatedTime, kn) {
@@ -607,6 +607,817 @@ done:
}
// MarshalJSON marshal bytes to json - template
+func (j *PodContainerInfo) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *PodContainerInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteString(`,"state":`)
+ fflib.WriteJsonString(buf, string(j.State))
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtPodContainerInfobase = iota
+ ffjtPodContainerInfonosuchkey
+
+ ffjtPodContainerInfoID
+
+ ffjtPodContainerInfoState
+)
+
+var ffjKeyPodContainerInfoID = []byte("id")
+
+var ffjKeyPodContainerInfoState = []byte("state")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *PodContainerInfo) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *PodContainerInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtPodContainerInfobase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtPodContainerInfonosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'i':
+
+ if bytes.Equal(ffjKeyPodContainerInfoID, kn) {
+ currentKey = ffjtPodContainerInfoID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 's':
+
+ if bytes.Equal(ffjKeyPodContainerInfoState, kn) {
+ currentKey = ffjtPodContainerInfoState
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyPodContainerInfoState, kn) {
+ currentKey = ffjtPodContainerInfoState
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyPodContainerInfoID, kn) {
+ currentKey = ffjtPodContainerInfoID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtPodContainerInfonosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtPodContainerInfoID:
+ goto handle_ID
+
+ case ffjtPodContainerInfoState:
+ goto handle_State
+
+ case ffjtPodContainerInfonosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_State:
+
+ /* handler: j.State type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.State = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *PodInspect) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *PodInspect) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if j.Config != nil {
+ buf.WriteString(`{"Config":`)
+
+ {
+
+ err = j.Config.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ } else {
+ buf.WriteString(`{"Config":null`)
+ }
+ if j.State != nil {
+ buf.WriteString(`,"State":`)
+
+ {
+
+ err = j.State.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ } else {
+ buf.WriteString(`,"State":null`)
+ }
+ buf.WriteString(`,"Containers":`)
+ if j.Containers != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.Containers {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+
+ {
+
+ err = v.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtPodInspectbase = iota
+ ffjtPodInspectnosuchkey
+
+ ffjtPodInspectConfig
+
+ ffjtPodInspectState
+
+ ffjtPodInspectContainers
+)
+
+var ffjKeyPodInspectConfig = []byte("Config")
+
+var ffjKeyPodInspectState = []byte("State")
+
+var ffjKeyPodInspectContainers = []byte("Containers")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *PodInspect) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *PodInspect) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtPodInspectbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtPodInspectnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'C':
+
+ if bytes.Equal(ffjKeyPodInspectConfig, kn) {
+ currentKey = ffjtPodInspectConfig
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyPodInspectContainers, kn) {
+ currentKey = ffjtPodInspectContainers
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'S':
+
+ if bytes.Equal(ffjKeyPodInspectState, kn) {
+ currentKey = ffjtPodInspectState
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyPodInspectContainers, kn) {
+ currentKey = ffjtPodInspectContainers
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyPodInspectState, kn) {
+ currentKey = ffjtPodInspectState
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyPodInspectConfig, kn) {
+ currentKey = ffjtPodInspectConfig
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtPodInspectnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtPodInspectConfig:
+ goto handle_Config
+
+ case ffjtPodInspectState:
+ goto handle_State
+
+ case ffjtPodInspectContainers:
+ goto handle_Containers
+
+ case ffjtPodInspectnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Config:
+
+ /* handler: j.Config type=libpod.PodConfig kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ j.Config = nil
+
+ } else {
+
+ if j.Config == nil {
+ j.Config = new(PodConfig)
+ }
+
+ err = j.Config.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_State:
+
+ /* handler: j.State type=libpod.PodInspectState kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ j.State = nil
+
+ } else {
+
+ if j.State == nil {
+ j.State = new(PodInspectState)
+ }
+
+ err = j.State.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Containers:
+
+ /* handler: j.Containers type=[]libpod.PodContainerInfo kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Containers = nil
+ } else {
+
+ j.Containers = []PodContainerInfo{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJContainers PodContainerInfo
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJContainers type=libpod.PodContainerInfo kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ err = tmpJContainers.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ j.Containers = append(j.Containers, tmpJContainers)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *PodInspectState) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *PodInspectState) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"cgroupPath":`)
+ fflib.WriteJsonString(buf, string(j.CgroupPath))
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtPodInspectStatebase = iota
+ ffjtPodInspectStatenosuchkey
+
+ ffjtPodInspectStateCgroupPath
+)
+
+var ffjKeyPodInspectStateCgroupPath = []byte("cgroupPath")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *PodInspectState) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *PodInspectState) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtPodInspectStatebase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtPodInspectStatenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeyPodInspectStateCgroupPath, kn) {
+ currentKey = ffjtPodInspectStateCgroupPath
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyPodInspectStateCgroupPath, kn) {
+ currentKey = ffjtPodInspectStateCgroupPath
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtPodInspectStatenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtPodInspectStateCgroupPath:
+ goto handle_CgroupPath
+
+ case ffjtPodInspectStatenosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_CgroupPath:
+
+ /* handler: j.CgroupPath type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.CgroupPath = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
func (j *podState) MarshalJSON() ([]byte, error) {
var buf fflib.Buffer
if j == nil {
@@ -630,7 +1441,7 @@ func (j *podState) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
var obj []byte
_ = obj
_ = err
- buf.WriteString(`{"CgroupPath":`)
+ buf.WriteString(`{"cgroupPath":`)
fflib.WriteJsonString(buf, string(j.CgroupPath))
buf.WriteByte('}')
return nil
@@ -643,7 +1454,7 @@ const (
ffjtpodStateCgroupPath
)
-var ffjKeypodStateCgroupPath = []byte("CgroupPath")
+var ffjKeypodStateCgroupPath = []byte("cgroupPath")
// UnmarshalJSON umarshall json - template of ffjson
func (j *podState) UnmarshalJSON(input []byte) error {
@@ -706,7 +1517,7 @@ mainparse:
} else {
switch kn[0] {
- case 'C':
+ case 'c':
if bytes.Equal(ffjKeypodStateCgroupPath, kn) {
currentKey = ffjtpodStateCgroupPath
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
new file mode 100644
index 000000000..c8d8405bb
--- /dev/null
+++ b/libpod/pod_internal.go
@@ -0,0 +1,177 @@
+package libpod
+
+import (
+ "context"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Creates a new, empty pod
+func newPod(lockDir string, runtime *Runtime) (*Pod, error) {
+ pod := new(Pod)
+ pod.config = new(PodConfig)
+ pod.config.ID = stringid.GenerateNonCryptoID()
+ pod.config.Labels = make(map[string]string)
+ pod.config.CreatedTime = time.Now()
+ pod.state = new(podState)
+ pod.runtime = runtime
+
+ // Path our lock file will reside at
+ lockPath := filepath.Join(lockDir, pod.config.ID)
+ // Grab a lockfile at the given path
+ lock, err := storage.GetLockfile(lockPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating lockfile for new pod")
+ }
+ pod.lock = lock
+
+ return pod, nil
+}
+
+// Update pod state from database
+func (p *Pod) updatePod() error {
+ if err := p.runtime.state.UpdatePod(p); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Save pod state to database
+func (p *Pod) save() error {
+ if err := p.runtime.state.SavePod(p); err != nil {
+ return errors.Wrapf(err, "error saving pod %s state")
+ }
+
+ return nil
+}
+
+// Refresh a pod's state after restart
+func (p *Pod) refresh() error {
+ // Need to to an update from the DB to pull potentially-missing state
+ if err := p.runtime.state.UpdatePod(p); err != nil {
+ return err
+ }
+
+ if !p.valid {
+ return ErrPodRemoved
+ }
+
+ // We need to recreate the pod's cgroup
+ if p.config.UsePodCgroup {
+ switch p.runtime.config.CgroupManager {
+ case SystemdCgroupsManager:
+ // NOOP for now, until proper systemd cgroup management
+ // is implemented
+ case CgroupfsCgroupsManager:
+ p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID())
+
+ logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
+ default:
+ return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
+ }
+ }
+
+ // Save changes
+ return p.save()
+}
+
+// Visit a node on a container graph and start the container, or set an error if
+// a dependency failed to start. if restart is true, startNode will restart the node instead of starting it.
+func startNode(ctx context.Context, node *containerNode, setError bool, ctrErrors map[string]error, ctrsVisited map[string]bool, restart bool) {
+ // First, check if we have already visited the node
+ if ctrsVisited[node.id] {
+ return
+ }
+
+ // If setError is true, a dependency of us failed
+ // Mark us as failed and recurse
+ if setError {
+ // Mark us as visited, and set an error
+ ctrsVisited[node.id] = true
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
+
+ // Hit anyone who depends on us, and set errors on them too
+ for _, successor := range node.dependedOn {
+ startNode(ctx, successor, true, ctrErrors, ctrsVisited, restart)
+ }
+
+ return
+ }
+
+ // Have all our dependencies started?
+ // If not, don't visit the node yet
+ depsVisited := true
+ for _, dep := range node.dependsOn {
+ depsVisited = depsVisited && ctrsVisited[dep.id]
+ }
+ if !depsVisited {
+ // Don't visit us yet, all dependencies are not up
+ // We'll hit the dependencies eventually, and when we do it will
+ // recurse here
+ return
+ }
+
+ // Going to try to start the container, mark us as visited
+ ctrsVisited[node.id] = true
+
+ ctrErrored := false
+
+ // Check if dependencies are running
+ // Graph traversal means we should have started them
+ // But they could have died before we got here
+ // Does not require that the container be locked, we only need to lock
+ // the dependencies
+ depsStopped, err := node.container.checkDependenciesRunning()
+ if err != nil {
+ ctrErrors[node.id] = err
+ ctrErrored = true
+ } else if len(depsStopped) > 0 {
+ // Our dependencies are not running
+ depsList := strings.Join(depsStopped, ",")
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
+ ctrErrored = true
+ }
+
+ // Lock before we start
+ node.container.lock.Lock()
+
+ // Sync the container to pick up current state
+ if !ctrErrored {
+ if err := node.container.syncContainer(); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+
+ // Start the container (only if it is not running)
+ if !ctrErrored {
+ if !restart && node.container.state.State != ContainerStateRunning {
+ if err := node.container.initAndStart(ctx); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+ if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
+ if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+ }
+
+ node.container.lock.Unlock()
+
+ // Recurse to anyone who depends on us and start them
+ for _, successor := range node.dependedOn {
+ startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
+ }
+
+ return
+}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 0652b24f4..2fc10035e 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -628,16 +628,14 @@ func (r *Runtime) refresh(alivePath string) error {
for _, ctr := range ctrs {
ctr.lock.Lock()
if err := ctr.refresh(); err != nil {
- ctr.lock.Unlock()
- return err
+ logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
}
ctr.lock.Unlock()
}
for _, pod := range pods {
pod.lock.Lock()
if err := pod.refresh(); err != nil {
- pod.lock.Unlock()
- return err
+ logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
}
pod.lock.Unlock()
}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index ffad9d649..051b3e85e 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -174,7 +174,7 @@ func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool)
// Locks the container, but does not lock the runtime
func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool) error {
if !c.valid {
- if ok, _ := r.HasContainer(c.ID()); !ok {
+ if ok, _ := r.state.HasContainer(c.ID()); !ok {
// Container probably already removed
// Or was never in the runtime to begin with
return nil
diff --git a/libpod/stats.go b/libpod/stats.go
index 262c1ac00..7830919ba 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -16,11 +16,15 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
stats := new(ContainerStats)
stats.ContainerID = c.ID()
stats.Name = c.Name()
- c.lock.Lock()
- defer c.lock.Unlock()
- if err := c.syncContainer(); err != nil {
- return stats, err
+
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return stats, err
+ }
}
+
if c.state.State != ContainerStateRunning {
return stats, nil
}
diff --git a/libpod/util.go b/libpod/util.go
index 106dd4666..13235059f 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "sort"
"strconv"
"strings"
"time"
@@ -121,3 +122,26 @@ func WaitForFile(path string, timeout time.Duration) error {
return errors.Wrapf(ErrInternal, "timed out waiting for file %s", path)
}
}
+
+type byDestination []spec.Mount
+
+func (m byDestination) Len() int {
+ return len(m)
+}
+
+func (m byDestination) Less(i, j int) bool {
+ return m.parts(i) < m.parts(j)
+}
+
+func (m byDestination) Swap(i, j int) {
+ m[i], m[j] = m[j], m[i]
+}
+
+func (m byDestination) parts(i int) int {
+ return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
+}
+
+func sortMounts(m []spec.Mount) []spec.Mount {
+ sort.Sort(byDestination(m))
+ return m
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md
deleted file mode 100644
index 37a5098fd..000000000
--- a/vendor/github.com/docker/docker/pkg/stringid/README.md
+++ /dev/null
@@ -1 +0,0 @@
-This package provides helper functions for dealing with string identifiers
diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
deleted file mode 100644
index a0c7c42a0..000000000
--- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Package stringid provides helper functions for dealing with string identifiers
-package stringid
-
-import (
- cryptorand "crypto/rand"
- "encoding/hex"
- "fmt"
- "io"
- "math"
- "math/big"
- "math/rand"
- "regexp"
- "strconv"
- "strings"
- "time"
-)
-
-const shortLen = 12
-
-var (
- validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
- validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
-)
-
-// IsShortID determines if an arbitrary string *looks like* a short ID.
-func IsShortID(id string) bool {
- return validShortID.MatchString(id)
-}
-
-// TruncateID returns a shorthand version of a string identifier for convenience.
-// A collision with other shorthands is very unlikely, but possible.
-// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
-// will need to use a longer prefix, or the full-length Id.
-func TruncateID(id string) string {
- if i := strings.IndexRune(id, ':'); i >= 0 {
- id = id[i+1:]
- }
- if len(id) > shortLen {
- id = id[:shortLen]
- }
- return id
-}
-
-func generateID(r io.Reader) string {
- b := make([]byte, 32)
- for {
- if _, err := io.ReadFull(r, b); err != nil {
- panic(err) // This shouldn't happen
- }
- id := hex.EncodeToString(b)
- // if we try to parse the truncated for as an int and we don't have
- // an error then the value is all numeric and causes issues when
- // used as a hostname. ref #3869
- if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
- continue
- }
- return id
- }
-}
-
-// GenerateRandomID returns a unique id.
-func GenerateRandomID() string {
- return generateID(cryptorand.Reader)
-}
-
-// GenerateNonCryptoID generates unique id without using cryptographically
-// secure sources of random.
-// It helps you to save entropy.
-func GenerateNonCryptoID() string {
- return generateID(readerFunc(rand.Read))
-}
-
-// ValidateID checks whether an ID string is a valid image ID.
-func ValidateID(id string) error {
- if ok := validHex.MatchString(id); !ok {
- return fmt.Errorf("image ID %q is invalid", id)
- }
- return nil
-}
-
-func init() {
- // safely set the seed globally so we generate random ids. Tries to use a
- // crypto seed before falling back to time.
- var seed int64
- if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
- // This should not happen, but worst-case fallback to time-based seed.
- seed = time.Now().UnixNano()
- } else {
- seed = cryptoseed.Int64()
- }
-
- rand.Seed(seed)
-}
-
-type readerFunc func(p []byte) (int, error)
-
-func (fn readerFunc) Read(p []byte) (int, error) {
- return fn(p)
-}
diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
deleted file mode 100644
index 74776e65e..000000000
--- a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Package truncindex provides a general 'index tree', used by Docker
-// in order to be able to reference containers by only a few unambiguous
-// characters of their id.
-package truncindex
-
-import (
- "errors"
- "fmt"
- "strings"
- "sync"
-
- "github.com/tchap/go-patricia/patricia"
-)
-
-var (
- // ErrEmptyPrefix is an error returned if the prefix was empty.
- ErrEmptyPrefix = errors.New("Prefix can't be empty")
-
- // ErrIllegalChar is returned when a space is in the ID
- ErrIllegalChar = errors.New("illegal character: ' '")
-
- // ErrNotExist is returned when ID or its prefix not found in index.
- ErrNotExist = errors.New("ID does not exist")
-)
-
-// ErrAmbiguousPrefix is returned if the prefix was ambiguous
-// (multiple ids for the prefix).
-type ErrAmbiguousPrefix struct {
- prefix string
-}
-
-func (e ErrAmbiguousPrefix) Error() string {
- return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix)
-}
-
-// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
-// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
-type TruncIndex struct {
- sync.RWMutex
- trie *patricia.Trie
- ids map[string]struct{}
-}
-
-// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
-func NewTruncIndex(ids []string) (idx *TruncIndex) {
- idx = &TruncIndex{
- ids: make(map[string]struct{}),
-
- // Change patricia max prefix per node length,
- // because our len(ID) always 64
- trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
- }
- for _, id := range ids {
- idx.addID(id)
- }
- return
-}
-
-func (idx *TruncIndex) addID(id string) error {
- if strings.Contains(id, " ") {
- return ErrIllegalChar
- }
- if id == "" {
- return ErrEmptyPrefix
- }
- if _, exists := idx.ids[id]; exists {
- return fmt.Errorf("id already exists: '%s'", id)
- }
- idx.ids[id] = struct{}{}
- if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
- return fmt.Errorf("failed to insert id: %s", id)
- }
- return nil
-}
-
-// Add adds a new ID to the TruncIndex.
-func (idx *TruncIndex) Add(id string) error {
- idx.Lock()
- defer idx.Unlock()
- return idx.addID(id)
-}
-
-// Delete removes an ID from the TruncIndex. If there are multiple IDs
-// with the given prefix, an error is thrown.
-func (idx *TruncIndex) Delete(id string) error {
- idx.Lock()
- defer idx.Unlock()
- if _, exists := idx.ids[id]; !exists || id == "" {
- return fmt.Errorf("no such id: '%s'", id)
- }
- delete(idx.ids, id)
- if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
- return fmt.Errorf("no such id: '%s'", id)
- }
- return nil
-}
-
-// Get retrieves an ID from the TruncIndex. If there are multiple IDs
-// with the given prefix, an error is thrown.
-func (idx *TruncIndex) Get(s string) (string, error) {
- if s == "" {
- return "", ErrEmptyPrefix
- }
- var (
- id string
- )
- subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
- if id != "" {
- // we haven't found the ID if there are two or more IDs
- id = ""
- return ErrAmbiguousPrefix{prefix: string(prefix)}
- }
- id = string(prefix)
- return nil
- }
-
- idx.RLock()
- defer idx.RUnlock()
- if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
- return "", err
- }
- if id != "" {
- return id, nil
- }
- return "", ErrNotExist
-}
-
-// Iterate iterates over all stored IDs and passes each of them to the given
-// handler. Take care that the handler method does not call any public
-// method on truncindex as the internal locking is not reentrant/recursive
-// and will result in deadlock.
-func (idx *TruncIndex) Iterate(handler func(id string)) {
- idx.Lock()
- defer idx.Unlock()
- idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
- handler(string(prefix))
- return nil
- })
-}
diff --git a/version/version.go b/version/version.go
index 37a42df46..a4228f00e 100644
--- a/version/version.go
+++ b/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.8.2-dev"
+const Version = "0.8.3-dev"