summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/common/common.go99
-rw-r--r--libpod/common/docker_registry_options.go34
-rw-r--r--libpod/common/output_interfaces.go1
-rw-r--r--libpod/common/signing_options.go10
-rw-r--r--libpod/container.go428
-rw-r--r--libpod/diff.go53
-rw-r--r--libpod/driver/driver.go27
-rw-r--r--libpod/errors.go62
-rw-r--r--libpod/images/image_data.go203
-rw-r--r--libpod/in_memory_state.go273
-rw-r--r--libpod/layers/layer.go12
-rw-r--r--libpod/oci.go273
-rw-r--r--libpod/options.go382
-rw-r--r--libpod/pod.go137
-rw-r--r--libpod/runtime.go192
-rw-r--r--libpod/runtime_ctr.go229
-rw-r--r--libpod/runtime_img.go823
-rw-r--r--libpod/runtime_pod.go122
-rw-r--r--libpod/state.go38
-rw-r--r--libpod/storage.go261
20 files changed, 3659 insertions, 0 deletions
diff --git a/libpod/common/common.go b/libpod/common/common.go
new file mode 100644
index 000000000..775d391da
--- /dev/null
+++ b/libpod/common/common.go
@@ -0,0 +1,99 @@
+package common
+
+import (
+ "io"
+ "strings"
+ "syscall"
+
+ cp "github.com/containers/image/copy"
+ "github.com/containers/image/signature"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+var (
+ // ErrNoPassword is returned if the user did not supply a password
+ ErrNoPassword = errors.Wrapf(syscall.EINVAL, "password was not supplied")
+)
+
+// GetCopyOptions constructs a new containers/image/copy.Options{} struct from the given parameters
+func GetCopyOptions(reportWriter io.Writer, signaturePolicyPath string, srcDockerRegistry, destDockerRegistry *DockerRegistryOptions, signing SigningOptions, authFile string) *cp.Options {
+ if srcDockerRegistry == nil {
+ srcDockerRegistry = &DockerRegistryOptions{}
+ }
+ if destDockerRegistry == nil {
+ destDockerRegistry = &DockerRegistryOptions{}
+ }
+ srcContext := srcDockerRegistry.GetSystemContext(signaturePolicyPath, authFile)
+ destContext := destDockerRegistry.GetSystemContext(signaturePolicyPath, authFile)
+ return &cp.Options{
+ RemoveSignatures: signing.RemoveSignatures,
+ SignBy: signing.SignBy,
+ ReportWriter: reportWriter,
+ SourceCtx: srcContext,
+ DestinationCtx: destContext,
+ }
+}
+
+// GetSystemContext Constructs a new containers/image/types.SystemContext{} struct from the given signaturePolicy path
+func GetSystemContext(signaturePolicyPath, authFilePath string) *types.SystemContext {
+ sc := &types.SystemContext{}
+ if signaturePolicyPath != "" {
+ sc.SignaturePolicyPath = signaturePolicyPath
+ }
+ sc.AuthFilePath = authFilePath
+ return sc
+}
+
+// CopyStringStringMap deep copies a map[string]string and returns the result
+func CopyStringStringMap(m map[string]string) map[string]string {
+ n := map[string]string{}
+ for k, v := range m {
+ n[k] = v
+ }
+ return n
+}
+
+// IsTrue determines whether the given string equals "true"
+func IsTrue(str string) bool {
+ return str == "true"
+}
+
+// IsFalse determines whether the given string equals "false"
+func IsFalse(str string) bool {
+ return str == "false"
+}
+
+// IsValidBool determines whether the given string equals "true" or "false"
+func IsValidBool(str string) bool {
+ return IsTrue(str) || IsFalse(str)
+}
+
+// GetPolicyContext creates a signature policy context for the given signature policy path
+func GetPolicyContext(path string) (*signature.PolicyContext, error) {
+ policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path})
+ if err != nil {
+ return nil, err
+ }
+ return signature.NewPolicyContext(policy)
+}
+
+// ParseRegistryCreds takes a credentials string in the form USERNAME:PASSWORD
+// and returns a DockerAuthConfig
+func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) {
+ if creds == "" {
+ return nil, errors.New("no credentials supplied")
+ }
+ if !strings.Contains(creds, ":") {
+ return &types.DockerAuthConfig{
+ Username: creds,
+ Password: "",
+ }, ErrNoPassword
+ }
+ v := strings.SplitN(creds, ":", 2)
+ cfg := &types.DockerAuthConfig{
+ Username: v[0],
+ Password: v[1],
+ }
+ return cfg, nil
+}
diff --git a/libpod/common/docker_registry_options.go b/libpod/common/docker_registry_options.go
new file mode 100644
index 000000000..24fa5c03e
--- /dev/null
+++ b/libpod/common/docker_registry_options.go
@@ -0,0 +1,34 @@
+package common
+
+import "github.com/containers/image/types"
+
+// DockerRegistryOptions encapsulates settings that affect how we connect or
+// authenticate to a remote registry.
+type DockerRegistryOptions struct {
+ // DockerRegistryCreds is the user name and password to supply in case
+ // we need to pull an image from a registry, and it requires us to
+ // authenticate.
+ DockerRegistryCreds *types.DockerAuthConfig
+ // DockerCertPath is the location of a directory containing CA
+ // certificates which will be used to verify the registry's certificate
+ // (all files with names ending in ".crt"), and possibly client
+ // certificates and private keys (pairs of files with the same name,
+ // except for ".cert" and ".key" suffixes).
+ DockerCertPath string
+ // DockerInsecureSkipTLSVerify turns off verification of TLS
+ // certificates and allows connecting to registries without encryption.
+ DockerInsecureSkipTLSVerify bool
+}
+
+// GetSystemContext constructs a new system context from the given signaturePolicy path and the
+// values in the DockerRegistryOptions
+func (o DockerRegistryOptions) GetSystemContext(signaturePolicyPath, authFile string) *types.SystemContext {
+ sc := &types.SystemContext{
+ SignaturePolicyPath: signaturePolicyPath,
+ DockerAuthConfig: o.DockerRegistryCreds,
+ DockerCertPath: o.DockerCertPath,
+ DockerInsecureSkipTLSVerify: o.DockerInsecureSkipTLSVerify,
+ AuthFilePath: authFile,
+ }
+ return sc
+}
diff --git a/libpod/common/output_interfaces.go b/libpod/common/output_interfaces.go
new file mode 100644
index 000000000..805d0c79a
--- /dev/null
+++ b/libpod/common/output_interfaces.go
@@ -0,0 +1 @@
+package common
diff --git a/libpod/common/signing_options.go b/libpod/common/signing_options.go
new file mode 100644
index 000000000..b7e14be82
--- /dev/null
+++ b/libpod/common/signing_options.go
@@ -0,0 +1,10 @@
+package common
+
+// SigningOptions encapsulates settings that control whether or not we strip or
+// add signatures to images when writing them.
+type SigningOptions struct {
+ // RemoveSignatures directs us to remove any signatures which are already present.
+ RemoveSignatures bool
+ // SignBy is a key identifier of some kind, indicating that a signature should be generated using the specified private key and stored with the image.
+ SignBy string
+}
diff --git a/libpod/container.go b/libpod/container.go
new file mode 100644
index 000000000..390b20b31
--- /dev/null
+++ b/libpod/container.go
@@ -0,0 +1,428 @@
+package libpod
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/containers/storage"
+ "github.com/docker/docker/pkg/stringid"
+ crioAnnotations "github.com/kubernetes-incubator/cri-o/pkg/annotations"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/ulule/deepcopier"
+)
+
+// ContainerState represents the current state of a container
+type ContainerState int
+
+const (
+ // ContainerStateUnknown indicates that the container is in an error
+ // state where information about it cannot be retrieved
+ ContainerStateUnknown ContainerState = iota
+ // ContainerStateConfigured indicates that the container has had its
+ // storage configured but it has not been created in the OCI runtime
+ ContainerStateConfigured ContainerState = iota
+ // ContainerStateCreated indicates the container has been created in
+ // the OCI runtime but not started
+ ContainerStateCreated ContainerState = iota
+ // ContainerStateRunning indicates the container is currently executing
+ ContainerStateRunning ContainerState = iota
+ // ContainerStateStopped indicates that the container was running but has
+ // exited
+ ContainerStateStopped ContainerState = iota
+ // ContainerStatePaused indicates that the container has been paused
+ ContainerStatePaused ContainerState = iota
+)
+
+// Container is a single OCI container
+type Container struct {
+ config *containerConfig
+
+ pod *Pod
+ runningSpec *spec.Spec
+
+ state *containerRuntimeInfo
+
+ // TODO move to storage.Locker from sync.Mutex
+ valid bool
+ lock sync.Mutex
+ runtime *Runtime
+}
+
+// containerState contains the current state of the container
+// It is stored on disk in a tmpfs and recreated on reboot
+type containerRuntimeInfo struct {
+ // The current state of the running container
+ State ContainerState `json:"state"`
+ // The path to the JSON OCI runtime spec for this container
+ ConfigPath string `json:"configPath,omitempty"`
+ // RunDir is a per-boot directory for container content
+ RunDir string `json:"runDir,omitempty"`
+ // Mounted indicates whether the container's storage has been mounted
+ // for use
+ Mounted bool `json:"-"`
+ // MountPoint contains the path to the container's mounted storage
+ Mountpoint string `json:"mountPoint,omitempty"`
+ // StartedTime is the time the container was started
+ StartedTime time.Time `json:"startedTime,omitempty"`
+ // FinishedTime is the time the container finished executing
+ FinishedTime time.Time `json:"finishedTime,omitempty"`
+ // ExitCode is the exit code returned when the container stopped
+ ExitCode int32 `json:"exitCode,omitempty"`
+
+ // TODO: Save information about image used in container if one is used
+}
+
+// containerConfig contains all information that was used to create the
+// container. It may not be changed once created.
+// It is stored, read-only, on disk
+type containerConfig struct {
+ Spec *spec.Spec `json:"spec"`
+ ID string `json:"id"`
+ Name string `json:"name"`
+ // RootfsFromImage indicates whether the container uses a root
+ // filesystem from an image, or from a user-provided directory
+ RootfsFromImage bool
+ // Directory used as a root filesystem if not configured with an image
+ RootfsDir string `json:"rootfsDir,omitempty"`
+ // Information on the image used for the root filesystem
+ RootfsImageID string `json:"rootfsImageID,omitempty"`
+ RootfsImageName string `json:"rootfsImageName,omitempty"`
+ UseImageConfig bool `json:"useImageConfig"`
+ // Whether to keep container STDIN open
+ Stdin bool
+ // Static directory for container content that will persist across
+ // reboot
+ StaticDir string `json:"staticDir"`
+ // Pod the container belongs to
+ Pod string `json:"pod,omitempty"`
+ // Labels is a set of key-value pairs providing additional information
+ // about a container
+ Labels map[string]string `json:"labels,omitempty"`
+ // StopSignal is the signal that will be used to stop the container
+ StopSignal uint `json:"stopSignal,omitempty"`
+ // Shared namespaces with container
+ SharedNamespaceCtr *string `json:"shareNamespacesWith,omitempty"`
+ SharedNamespaceMap map[string]string `json:"sharedNamespaces"`
+ // Time container was created
+ CreatedTime time.Time `json:"createdTime"`
+
+ // TODO save log location here and pass into OCI code
+ // TODO allow overriding of log path
+}
+
+// ID returns the container's ID
+func (c *Container) ID() string {
+ return c.config.ID
+}
+
+// Name returns the container's name
+func (c *Container) Name() string {
+ return c.config.Name
+}
+
+// Spec returns the container's OCI runtime spec
+// The spec returned is the one used to create the container. The running
+// spec may differ slightly as mounts are added based on the image
+func (c *Container) Spec() *spec.Spec {
+ spec := new(spec.Spec)
+ deepcopier.Copy(c.config.Spec).To(spec)
+
+ return spec
+}
+
+// Labels returns the container's labels
+func (c *Container) Labels() map[string]string {
+ labels := make(map[string]string)
+ for key, value := range c.config.Labels {
+ labels[key] = value
+ }
+
+ return labels
+}
+
+// State returns the current state of the container
+func (c *Container) State() (ContainerState, error) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // TODO uncomment when working
+ // if err := c.runtime.ociRuntime.updateContainerStatus(c); err != nil {
+ // return ContainerStateUnknown, err
+ // }
+
+ return c.state.State, nil
+}
+
+// The path to the container's root filesystem - where the OCI spec will be
+// placed, amongst other things
+func (c *Container) bundlePath() string {
+ return c.state.RunDir
+}
+
+// Retrieves the path of the container's attach socket
+func (c *Container) attachSocketPath() string {
+ return filepath.Join(c.runtime.ociRuntime.socketsDir, c.ID(), "attach")
+}
+
+// Make a new container
+func newContainer(rspec *spec.Spec) (*Container, error) {
+ if rspec == nil {
+ return nil, errors.Wrapf(ErrInvalidArg, "must provide a valid runtime spec to create container")
+ }
+
+ ctr := new(Container)
+ ctr.config = new(containerConfig)
+ ctr.state = new(containerRuntimeInfo)
+
+ ctr.config.ID = stringid.GenerateNonCryptoID()
+ ctr.config.Name = ctr.config.ID // TODO generate unique human-readable names
+
+ ctr.config.Spec = new(spec.Spec)
+ deepcopier.Copy(rspec).To(ctr.config.Spec)
+
+ ctr.config.CreatedTime = time.Now()
+
+ return ctr, nil
+}
+
+// Create container root filesystem for use
+func (c *Container) setupStorage() error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if !c.valid {
+ return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
+ }
+
+ if c.state.State != ContainerStateConfigured {
+ return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
+ }
+
+ // If we're configured to use a directory, perform that setup
+ if !c.config.RootfsFromImage {
+ // TODO implement directory-based root filesystems
+ return ErrNotImplemented
+ }
+
+ // Not using a directory, so call into containers/storage
+ return c.setupImageRootfs()
+}
+
+// Set up an image as root filesystem using containers/storage
+func (c *Container) setupImageRootfs() error {
+ // Need both an image ID and image name, plus a bool telling us whether to use the image configuration
+ if c.config.RootfsImageID == "" || c.config.RootfsImageName == "" {
+ return errors.Wrapf(ErrInvalidArg, "must provide image ID and image name to use an image")
+ }
+
+ // TODO SELinux mount label
+ containerInfo, err := c.runtime.storageService.CreateContainerStorage(c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, "")
+ if err != nil {
+ return errors.Wrapf(err, "error creating container storage")
+ }
+
+ c.config.StaticDir = containerInfo.Dir
+ c.state.RunDir = containerInfo.RunDir
+
+ return nil
+}
+
+// Tear down a container's storage prior to removal
+func (c *Container) teardownStorage() error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if !c.valid {
+ return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
+ }
+
+ if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
+ return errors.Wrapf(ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
+ }
+
+ if !c.config.RootfsFromImage {
+ // TODO implement directory-based root filesystems
+ return ErrNotImplemented
+ }
+
+ return c.teardownImageRootfs()
+}
+
+// Completely remove image-based root filesystem for a container
+func (c *Container) teardownImageRootfs() error {
+ if c.state.Mounted {
+ if err := c.runtime.storageService.StopContainer(c.ID()); err != nil {
+ return errors.Wrapf(err, "error unmounting container %s root filesystem", c.ID())
+ }
+
+ c.state.Mounted = false
+ }
+
+ if err := c.runtime.storageService.DeleteContainer(c.ID()); err != nil {
+ return errors.Wrapf(err, "error removing container %s root filesystem", c.ID())
+ }
+
+ return nil
+}
+
+// Create creates a container in the OCI runtime
+func (c *Container) Create() (err error) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if !c.valid {
+ return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
+ }
+
+ if c.state.State != ContainerStateConfigured {
+ return errors.Wrapf(ErrCtrExists, "container %s has already been created in runtime", c.ID())
+ }
+
+ // If using containers/storage, mount the container
+ if !c.config.RootfsFromImage {
+ // TODO implement directory-based root filesystems
+ if !c.state.Mounted {
+ return ErrNotImplemented
+ }
+ } else {
+ mountPoint, err := c.runtime.storageService.StartContainer(c.ID())
+ if err != nil {
+ return errors.Wrapf(err, "error mounting storage for container %s", c.ID())
+ }
+ c.state.Mounted = true
+ c.state.Mountpoint = mountPoint
+
+ logrus.Debugf("Created root filesystem for container %s at %s", c.ID(), c.state.Mountpoint)
+
+ defer func() {
+ if err != nil {
+ if err2 := c.runtime.storageService.StopContainer(c.ID()); err2 != nil {
+ logrus.Errorf("Error unmounting storage for container %s: %v", c.ID(), err2)
+ }
+
+ c.state.Mounted = false
+ c.state.Mountpoint = ""
+ }
+ }()
+ }
+
+ // Make the OCI runtime spec we will use
+ c.runningSpec = new(spec.Spec)
+ deepcopier.Copy(c.config.Spec).To(c.runningSpec)
+ c.runningSpec.Root.Path = c.state.Mountpoint
+ c.runningSpec.Annotations[crioAnnotations.Created] = c.config.CreatedTime.Format(time.RFC3339Nano)
+ c.runningSpec.Annotations["org.opencontainers.image.stopSignal"] = fmt.Sprintf("%d", c.config.StopSignal)
+
+ // Save the OCI spec to disk
+ jsonPath := filepath.Join(c.bundlePath(), "config.json")
+ fileJSON, err := json.Marshal(c.runningSpec)
+ if err != nil {
+ return errors.Wrapf(err, "error exporting runtime spec for container %s to JSON", c.ID())
+ }
+ if err := ioutil.WriteFile(jsonPath, fileJSON, 0644); err != nil {
+ return errors.Wrapf(err, "error writing runtime spec JSON to file for container %s", c.ID())
+ }
+ c.state.ConfigPath = jsonPath
+
+ logrus.Debugf("Created OCI spec for container %s at %s", c.ID(), jsonPath)
+
+ // With the spec complete, do an OCI create
+ // TODO set cgroup parent in a sane fashion
+ if err := c.runtime.ociRuntime.createContainer(c, "/libpod_parent"); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Created container %s in runc", c.ID())
+
+ // TODO should flush this state to disk here
+ c.state.State = ContainerStateCreated
+
+ return nil
+}
+
+// Start starts a container
+func (c *Container) Start() error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if !c.valid {
+ return ErrCtrRemoved
+ }
+
+ // Container must be created or stopped to be started
+ if !(c.state.State == ContainerStateCreated || c.state.State == ContainerStateStopped) {
+ return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
+ }
+
+ if err := c.runtime.ociRuntime.startContainer(c); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Started container %s", c.ID())
+
+ // TODO should flush state to disk here
+ c.state.StartedTime = time.Now()
+ c.state.State = ContainerStateRunning
+
+ return nil
+}
+
+// Stop stops a container
+func (c *Container) Stop() error {
+ return ErrNotImplemented
+}
+
+// Kill sends a signal to a container
+func (c *Container) Kill(signal uint) error {
+ return ErrNotImplemented
+}
+
+// Exec starts a new process inside the container
+// Returns fully qualified URL of streaming server for executed process
+func (c *Container) Exec(cmd []string, tty bool, stdin bool) (string, error) {
+ return "", ErrNotImplemented
+}
+
+// Attach attaches to a container
+// Returns fully qualified URL of streaming server for the container
+func (c *Container) Attach(stdin, tty bool) (string, error) {
+ return "", ErrNotImplemented
+}
+
+// Mount mounts a container's filesystem on the host
+// The path where the container has been mounted is returned
+func (c *Container) Mount() (string, error) {
+ return "", ErrNotImplemented
+}
+
+// Pause pauses a container
+func (c *Container) Pause() error {
+ return ErrNotImplemented
+}
+
+// Unpause unpauses a container
+func (c *Container) Unpause() error {
+ return ErrNotImplemented
+}
+
+// Export exports a container's root filesystem as a tar archive
+// The archive will be saved as a file at the given path
+func (c *Container) Export(path string) error {
+ return ErrNotImplemented
+}
+
+// Commit commits the changes between a container and its image, creating a new
+// image
+// If the container was not created from an image (for example,
+// WithRootFSFromPath will create a container from a directory on the system),
+// a new base image will be created from the contents of the container's
+// filesystem
+func (c *Container) Commit() (*storage.Image, error) {
+ return nil, ErrNotImplemented
+}
diff --git a/libpod/diff.go b/libpod/diff.go
new file mode 100644
index 000000000..055bb7fe8
--- /dev/null
+++ b/libpod/diff.go
@@ -0,0 +1,53 @@
+package libpod
+
+import (
+ "github.com/containers/storage/pkg/archive"
+ "github.com/kubernetes-incubator/cri-o/libpod/layers"
+ "github.com/pkg/errors"
+)
+
+// GetDiff returns the differences between the two images, layers, or containers
+func (r *Runtime) GetDiff(from, to string) ([]archive.Change, error) {
+ toLayer, err := r.getLayerID(to)
+ if err != nil {
+ return nil, err
+ }
+ fromLayer := ""
+ if from != "" {
+ fromLayer, err = r.getLayerID(from)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return r.store.Changes(fromLayer, toLayer)
+}
+
+// GetLayerID gets a full layer id given a full or partial id
+// If the id matches a container or image, the id of the top layer is returned
+// If the id matches a layer, the top layer id is returned
+func (r *Runtime) getLayerID(id string) (string, error) {
+ var toLayer string
+ toImage, err := r.GetImage(id)
+ if err != nil {
+ toCtr, err := r.store.Container(id)
+ if err != nil {
+ toLayer, err = layers.FullID(r.store, id)
+ if err != nil {
+ return "", errors.Errorf("layer, image, or container %s does not exist", id)
+ }
+ } else {
+ toLayer = toCtr.LayerID
+ }
+ } else {
+ toLayer = toImage.TopLayer
+ }
+ return toLayer, nil
+}
+
+func (r *Runtime) getLayerParent(layerID string) (string, error) {
+ layer, err := r.store.Layer(layerID)
+ if err != nil {
+ return "", err
+ }
+ return layer.Parent, nil
+}
diff --git a/libpod/driver/driver.go b/libpod/driver/driver.go
new file mode 100644
index 000000000..4db55852c
--- /dev/null
+++ b/libpod/driver/driver.go
@@ -0,0 +1,27 @@
+package driver
+
+import cstorage "github.com/containers/storage"
+
+// Data handles the data for a storage driver
+type Data struct {
+ Name string
+ Data map[string]string
+}
+
+// GetDriverName returns the name of the driver for the given store
+func GetDriverName(store cstorage.Store) (string, error) {
+ driver, err := store.GraphDriver()
+ if err != nil {
+ return "", err
+ }
+ return driver.String(), nil
+}
+
+// GetDriverMetadata returns the metadata regarding the driver for the layer in the given store
+func GetDriverMetadata(store cstorage.Store, layerID string) (map[string]string, error) {
+ driver, err := store.GraphDriver()
+ if err != nil {
+ return nil, err
+ }
+ return driver.Metadata(layerID)
+}
diff --git a/libpod/errors.go b/libpod/errors.go
new file mode 100644
index 000000000..245445005
--- /dev/null
+++ b/libpod/errors.go
@@ -0,0 +1,62 @@
+package libpod
+
+import (
+ "errors"
+)
+
+var (
+ // ErrNoSuchCtr indicates the requested container does not exist
+ ErrNoSuchCtr = errors.New("no such container")
+ // ErrNoSuchPod indicates the requested pod does not exist
+ ErrNoSuchPod = errors.New("no such pod")
+ // ErrNoSuchImage indicates the requested image does not exist
+ ErrNoSuchImage = errors.New("no such image")
+
+ // ErrCtrExists indicates a container with the same name or ID already
+ // exists
+ ErrCtrExists = errors.New("container already exists")
+ // ErrPodExists indicates a pod with the same name or ID already exists
+ ErrPodExists = errors.New("pod already exists")
+ // ErrImageExists indicated an image with the same ID already exists
+ ErrImageExists = errors.New("image already exists")
+
+ // ErrCtrStateInvalid indicates a container is in an improper state for
+ // the requested operation
+ ErrCtrStateInvalid = errors.New("container state improper")
+
+ // ErrRuntimeFinalized indicates that the runtime has already been
+ // created and cannot be modified
+ ErrRuntimeFinalized = errors.New("runtime has been finalized")
+ // ErrCtrFinalized indicates that the container has already been created
+ // and cannot be modified
+ ErrCtrFinalized = errors.New("container has been finalized")
+ // ErrPodFinalized indicates that the pod has already been created and
+ // cannot be modified
+ ErrPodFinalized = errors.New("pod has been finalized")
+
+ // ErrInvalidArg indicates that an invalid argument was passed
+ ErrInvalidArg = errors.New("invalid argument")
+ // ErrEmptyID indicates that an empty ID was passed
+ ErrEmptyID = errors.New("name or ID cannot be empty")
+
+ // ErrInternal indicates an internal library error
+ ErrInternal = errors.New("internal libpod error")
+
+ // ErrRuntimeStopped indicates that the runtime has already been shut
+ // down and no further operations can be performed on it
+ ErrRuntimeStopped = errors.New("runtime has already been stopped")
+ // ErrCtrStopped indicates that the requested container is not running
+ // and the requested operation cannot be performed until it is started
+ ErrCtrStopped = errors.New("container is stopped")
+
+ // ErrCtrRemoved indicates that the container has already been removed
+ // and no further operations can be performed on it
+ ErrCtrRemoved = errors.New("container has already been removed")
+ // ErrPodRemoved indicates that the pod has already been removed and no
+ // further operations can be performed on it
+ ErrPodRemoved = errors.New("pod has already been removed")
+
+ // ErrNotImplemented indicates that the requested functionality is not
+ // yet present
+ ErrNotImplemented = errors.New("not yet implemented")
+)
diff --git a/libpod/images/image_data.go b/libpod/images/image_data.go
new file mode 100644
index 000000000..12fb1e77a
--- /dev/null
+++ b/libpod/images/image_data.go
@@ -0,0 +1,203 @@
+package images
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ is "github.com/containers/image/storage"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/kubernetes-incubator/cri-o/libpod/driver"
+ digest "github.com/opencontainers/go-digest"
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// Data handles the data used when inspecting a container
+// nolint
+type Data struct {
+ ID string
+ Tags []string
+ Digests []string
+ Digest digest.Digest
+ Comment string
+ Created *time.Time
+ Container string
+ Author string
+ Config ociv1.ImageConfig
+ Architecture string
+ OS string
+ Annotations map[string]string
+ CreatedBy string
+ Size uint
+ VirtualSize uint
+ GraphDriver driver.Data
+ RootFS ociv1.RootFS
+}
+
+// ParseImageNames parses the names we've stored with an image into a list of
+// tagged references and a list of references which contain digests.
+func ParseImageNames(names []string) (tags, digests []string, err error) {
+ for _, name := range names {
+ if named, err := reference.ParseNamed(name); err == nil {
+ if digested, ok := named.(reference.Digested); ok {
+ canonical, err := reference.WithDigest(named, digested.Digest())
+ if err == nil {
+ digests = append(digests, canonical.String())
+ }
+ } else {
+ if reference.IsNameOnly(named) {
+ named = reference.TagNameOnly(named)
+ }
+ if tagged, ok := named.(reference.Tagged); ok {
+ namedTagged, err := reference.WithTag(named, tagged.Tag())
+ if err == nil {
+ tags = append(tags, namedTagged.String())
+ }
+ }
+ }
+ }
+ }
+ return tags, digests, nil
+}
+
+func annotations(manifest []byte, manifestType string) map[string]string {
+ annotations := make(map[string]string)
+ switch manifestType {
+ case ociv1.MediaTypeImageManifest:
+ var m ociv1.Manifest
+ if err := json.Unmarshal(manifest, &m); err == nil {
+ for k, v := range m.Annotations {
+ annotations[k] = v
+ }
+ }
+ }
+ return annotations
+}
+
+// GetData gets the Data for a container with the given name in the given store.
+func GetData(store storage.Store, name string) (*Data, error) {
+ img, err := FindImage(store, name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading image %q", name)
+ }
+
+ imgRef, err := FindImageRef(store, "@"+img.ID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading image %q", img.ID)
+ }
+ defer imgRef.Close()
+
+ tags, digests, err := ParseImageNames(img.Names)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing image names for %q", name)
+ }
+
+ driverName, err := driver.GetDriverName(store)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading name of storage driver")
+ }
+
+ topLayerID := img.TopLayer
+
+ driverMetadata, err := driver.GetDriverMetadata(store, topLayerID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error asking storage driver %q for metadata", driverName)
+ }
+
+ layer, err := store.Layer(topLayerID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading information about layer %q", topLayerID)
+ }
+ size, err := store.DiffSize(layer.Parent, layer.ID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error determining size of layer %q", layer.ID)
+ }
+
+ imgSize, err := imgRef.Size()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error determining size of image %q", transports.ImageName(imgRef.Reference()))
+ }
+
+ manifest, manifestType, err := imgRef.Manifest()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading manifest for image %q", img.ID)
+ }
+ manifestDigest := digest.Digest("")
+ if len(manifest) > 0 {
+ manifestDigest = digest.Canonical.FromBytes(manifest)
+ }
+ annotations := annotations(manifest, manifestType)
+
+ config, err := imgRef.OCIConfig()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading image configuration for %q", img.ID)
+ }
+ historyComment := ""
+ historyCreatedBy := ""
+ if len(config.History) > 0 {
+ historyComment = config.History[len(config.History)-1].Comment
+ historyCreatedBy = config.History[len(config.History)-1].CreatedBy
+ }
+
+ return &Data{
+ ID: img.ID,
+ Tags: tags,
+ Digests: digests,
+ Digest: manifestDigest,
+ Comment: historyComment,
+ Created: config.Created,
+ Author: config.Author,
+ Config: config.Config,
+ Architecture: config.Architecture,
+ OS: config.OS,
+ Annotations: annotations,
+ CreatedBy: historyCreatedBy,
+ Size: uint(size),
+ VirtualSize: uint(size + imgSize),
+ GraphDriver: driver.Data{
+ Name: driverName,
+ Data: driverMetadata,
+ },
+ RootFS: config.RootFS,
+ }, nil
+}
+
+// FindImage searches for a *storage.Image with a matching the given name or ID in the given store.
+func FindImage(store storage.Store, image string) (*storage.Image, error) {
+ var img *storage.Image
+ ref, err := is.Transport.ParseStoreReference(store, image)
+ if err == nil {
+ img, err = is.Transport.GetStoreImage(store, ref)
+ }
+ if err != nil {
+ img2, err2 := store.Image(image)
+ if err2 != nil {
+ if ref == nil {
+ return nil, errors.Wrapf(err, "error parsing reference to image %q", image)
+ }
+ return nil, errors.Wrapf(err, "unable to locate image %q", image)
+ }
+ img = img2
+ }
+ return img, nil
+}
+
+// FindImageRef searches for and returns a new types.Image matching the given name or ID in the given store.
+func FindImageRef(store storage.Store, image string) (types.Image, error) {
+ img, err := FindImage(store, image)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to locate image %q", image)
+ }
+ ref, err := is.Transport.ParseStoreReference(store, "@"+img.ID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
+ }
+ imgRef, err := ref.NewImage(nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading image %q", img.ID)
+ }
+ return imgRef, nil
+}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
new file mode 100644
index 000000000..b8747d4e7
--- /dev/null
+++ b/libpod/in_memory_state.go
@@ -0,0 +1,273 @@
+package libpod
+
+import (
+ "github.com/docker/docker/pkg/truncindex"
+ "github.com/kubernetes-incubator/cri-o/pkg/registrar"
+ "github.com/pkg/errors"
+)
+
+// An InMemoryState is a purely in-memory state store
+type InMemoryState struct {
+ pods map[string]*Pod
+ containers map[string]*Container
+ podNameIndex *registrar.Registrar
+ podIDIndex *truncindex.TruncIndex
+ ctrNameIndex *registrar.Registrar
+ ctrIDIndex *truncindex.TruncIndex
+}
+
+// NewInMemoryState initializes a new, empty in-memory state
+func NewInMemoryState() (State, error) {
+ state := new(InMemoryState)
+
+ state.pods = make(map[string]*Pod)
+ state.containers = make(map[string]*Container)
+
+ state.podNameIndex = registrar.NewRegistrar()
+ state.ctrNameIndex = registrar.NewRegistrar()
+
+ state.podIDIndex = truncindex.NewTruncIndex([]string{})
+ state.ctrIDIndex = truncindex.NewTruncIndex([]string{})
+
+ return state, nil
+}
+
+// Container retrieves a container from its full ID
+func (s *InMemoryState) Container(id string) (*Container, error) {
+ if id == "" {
+ return nil, ErrEmptyID
+ }
+
+ ctr, ok := s.containers[id]
+ if !ok {
+ return nil, errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found", id)
+ }
+
+ return ctr, nil
+}
+
+// LookupContainer retrieves a container by full ID, unique partial ID, or name
+func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) {
+ if idOrName == "" {
+ return nil, ErrEmptyID
+ }
+
+ fullID, err := s.ctrNameIndex.Get(idOrName)
+ if err != nil {
+ if err == registrar.ErrNameNotReserved {
+ // What was passed is not a name, assume it's an ID
+ fullID, err = s.ctrIDIndex.Get(idOrName)
+ if err != nil {
+ if err == truncindex.ErrNotExist {
+ return nil, errors.Wrapf(ErrNoSuchCtr, "no container found with name or ID %s", idOrName)
+ }
+ return nil, errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName)
+ }
+ } else {
+ return nil, errors.Wrapf(err, "error performing registry lookup for ID %s", idOrName)
+ }
+ }
+
+ ctr, ok := s.containers[fullID]
+ if !ok {
+ // This should never happen
+ return nil, errors.Wrapf(ErrInternal, "mismatch in container ID registry and containers map for ID %s", fullID)
+ }
+
+ return ctr, nil
+}
+
+// HasContainer checks if a container with the given ID is present in the state
+func (s *InMemoryState) HasContainer(id string) (bool, error) {
+ if id == "" {
+ return false, ErrEmptyID
+ }
+
+ _, ok := s.containers[id]
+
+ return ok, nil
+}
+
+// AddContainer adds a container to the state
+// If the container belongs to a pod, the pod must already be present when the
+// container is added, and the container must be present in the pod
+func (s *InMemoryState) AddContainer(ctr *Container) error {
+ if !ctr.valid {
+ return errors.Wrapf(ErrCtrRemoved, "container with ID %s is not valid", ctr.ID())
+ }
+
+ _, ok := s.containers[ctr.ID()]
+ if ok {
+ return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in state", ctr.ID())
+ }
+
+ if ctr.pod != nil {
+ if _, ok := s.pods[ctr.pod.ID()]; !ok {
+ return errors.Wrapf(ErrNoSuchPod, "pod %s does not exist, cannot add container %s", ctr.pod.ID(), ctr.ID())
+ }
+
+ hasCtr, err := ctr.pod.HasContainer(ctr.ID())
+ if err != nil {
+ return errors.Wrapf(err, "error checking if container %s is present in pod %s", ctr.ID(), ctr.pod.ID())
+ } else if !hasCtr {
+ return errors.Wrapf(ErrNoSuchCtr, "container %s is not present in pod %s", ctr.ID(), ctr.pod.ID())
+ }
+ }
+
+ if err := s.ctrNameIndex.Reserve(ctr.Name(), ctr.ID()); err != nil {
+ return errors.Wrapf(err, "error registering container name %s", ctr.Name())
+ }
+
+ if err := s.ctrIDIndex.Add(ctr.ID()); err != nil {
+ s.ctrNameIndex.Release(ctr.Name())
+ return errors.Wrapf(err, "error registering container ID %s", ctr.ID())
+ }
+
+ s.containers[ctr.ID()] = ctr
+
+ return nil
+}
+
+// RemoveContainer removes a container from the state
+// The container will only be removed from the state, not from the pod the container belongs to
+func (s *InMemoryState) RemoveContainer(ctr *Container) error {
+ // Almost no validity checks are performed, to ensure we can kick
+ // misbehaving containers out of the state
+
+ if _, ok := s.containers[ctr.ID()]; !ok {
+ return errors.Wrapf(ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID())
+ }
+
+ if err := s.ctrIDIndex.Delete(ctr.ID()); err != nil {
+ return errors.Wrapf(err, "error removing container ID from index")
+ }
+ delete(s.containers, ctr.ID())
+ s.ctrNameIndex.Release(ctr.Name())
+
+ return nil
+}
+
+// AllContainers retrieves all containers from the state
+func (s *InMemoryState) AllContainers() ([]*Container, error) {
+ ctrs := make([]*Container, 0, len(s.containers))
+ for _, ctr := range s.containers {
+ ctrs = append(ctrs, ctr)
+ }
+
+ return ctrs, nil
+}
+
+// Pod retrieves a pod from the state from its full ID
+func (s *InMemoryState) Pod(id string) (*Pod, error) {
+ if id == "" {
+ return nil, ErrEmptyID
+ }
+
+ pod, ok := s.pods[id]
+ if !ok {
+ return nil, errors.Wrapf(ErrNoSuchPod, "no pod with id %s found", id)
+ }
+
+ return pod, nil
+}
+
+// LookupPod retrieves a pod from the state from a full or unique partial ID or
+// a full name
+func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) {
+ if idOrName == "" {
+ return nil, ErrEmptyID
+ }
+
+ fullID, err := s.podNameIndex.Get(idOrName)
+ if err != nil {
+ if err == registrar.ErrNameNotReserved {
+ // What was passed is not a name, assume it's an ID
+ fullID, err = s.podIDIndex.Get(idOrName)
+ if err != nil {
+ if err == truncindex.ErrNotExist {
+ return nil, errors.Wrapf(ErrNoSuchPod, "no pod found with name or ID %s", idOrName)
+ }
+ return nil, errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName)
+ }
+ } else {
+ return nil, errors.Wrapf(err, "error performing registry lookup for ID %s", idOrName)
+ }
+ }
+
+ pod, ok := s.pods[fullID]
+ if !ok {
+ // This should never happen
+ return nil, errors.Wrapf(ErrInternal, "mismatch in pod ID registry and pod map for ID %s", fullID)
+ }
+
+ return pod, nil
+}
+
+// HasPod checks if a pod with the given ID is present in the state
+func (s *InMemoryState) HasPod(id string) (bool, error) {
+ if id == "" {
+ return false, ErrEmptyID
+ }
+
+ _, ok := s.pods[id]
+
+ return ok, nil
+}
+
+// AddPod adds a given pod to the state
+// Only empty pods can be added to the state
+func (s *InMemoryState) AddPod(pod *Pod) error {
+ if !pod.valid {
+ return errors.Wrapf(ErrPodRemoved, "pod %s is not valid and cannot be added", pod.ID())
+ }
+
+ if _, ok := s.pods[pod.ID()]; ok {
+ return errors.Wrapf(ErrPodExists, "pod with ID %s already exists in state", pod.ID())
+ }
+
+ if len(pod.containers) != 0 {
+ return errors.Wrapf(ErrInternal, "only empty pods can be added to the state")
+ }
+
+ if err := s.podNameIndex.Reserve(pod.Name(), pod.ID()); err != nil {
+ return errors.Wrapf(err, "error registering pod name %s", pod.Name())
+ }
+
+ if err := s.podIDIndex.Add(pod.ID()); err != nil {
+ s.podNameIndex.Release(pod.Name())
+ return errors.Wrapf(err, "error registering pod ID %s", pod.ID())
+ }
+
+ s.pods[pod.ID()] = pod
+
+ return nil
+}
+
+// RemovePod removes a given pod from the state
+// Containers within the pod will not be removed or changed
+func (s *InMemoryState) RemovePod(pod *Pod) error {
+ // Don't make many validity checks to ensure we can kick badly formed
+ // pods out of the state
+
+ if _, ok := s.pods[pod.ID()]; !ok {
+ return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID())
+ }
+
+ if err := s.podIDIndex.Delete(pod.ID()); err != nil {
+ return errors.Wrapf(err, "error removing pod ID %s from index", pod.ID())
+ }
+ delete(s.pods, pod.ID())
+ s.podNameIndex.Release(pod.Name())
+
+ return nil
+}
+
+// AllPods retrieves all pods currently in the state
+func (s *InMemoryState) AllPods() ([]*Pod, error) {
+ pods := make([]*Pod, 0, len(s.pods))
+ for _, pod := range s.pods {
+ pods = append(pods, pod)
+ }
+
+ return pods, nil
+}
diff --git a/libpod/layers/layer.go b/libpod/layers/layer.go
new file mode 100644
index 000000000..865cbe700
--- /dev/null
+++ b/libpod/layers/layer.go
@@ -0,0 +1,12 @@
+package layers
+
+import cstorage "github.com/containers/storage"
+
+// FullID gets the full id of a layer given a partial id or name
+func FullID(store cstorage.Store, id string) (string, error) {
+ layer, err := store.Layer(id)
+ if err != nil {
+ return "", err
+ }
+ return layer.ID, nil
+}
diff --git a/libpod/oci.go b/libpod/oci.go
new file mode 100644
index 000000000..0ed1c1f66
--- /dev/null
+++ b/libpod/oci.go
@@ -0,0 +1,273 @@
+package libpod
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "syscall"
+ "time"
+
+ "github.com/containerd/cgroups"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+
+ // TODO import these functions into libpod and remove the import
+ // Trying to keep libpod from depending on CRI-O code
+ "github.com/kubernetes-incubator/cri-o/utils"
+)
+
+// OCI code is undergoing heavy rewrite
+
+const (
+ // CgroupfsCgroupsManager represents cgroupfs native cgroup manager
+ CgroupfsCgroupsManager = "cgroupfs"
+ // SystemdCgroupsManager represents systemd native cgroup manager
+ SystemdCgroupsManager = "systemd"
+
+ // ContainerCreateTimeout represents the value of container creating timeout
+ ContainerCreateTimeout = 240 * time.Second
+)
+
+// OCIRuntime represents an OCI-compatible runtime that libpod can call into
+// to perform container operations
+type OCIRuntime struct {
+ name string
+ path string
+ conmonPath string
+ conmonEnv []string
+ cgroupManager string
+ tmpDir string
+ exitsDir string
+ socketsDir string
+ logSizeMax int64
+ noPivot bool
+}
+
+// syncInfo is used to return data from monitor process to daemon
+type syncInfo struct {
+ Pid int `json:"pid"`
+ Message string `json:"message,omitempty"`
+}
+
+// Make a new OCI runtime with provided options
+func newOCIRuntime(name string, path string, conmonPath string, conmonEnv []string, cgroupManager string, tmpDir string, logSizeMax int64, noPivotRoot bool) (*OCIRuntime, error) {
+ runtime := new(OCIRuntime)
+ runtime.name = name
+ runtime.path = path
+ runtime.conmonPath = conmonPath
+ runtime.conmonEnv = conmonEnv
+ runtime.cgroupManager = cgroupManager
+ runtime.tmpDir = tmpDir
+ runtime.logSizeMax = logSizeMax
+ runtime.noPivot = noPivotRoot
+
+ runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
+ runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket")
+
+ if cgroupManager != CgroupfsCgroupsManager && cgroupManager != SystemdCgroupsManager {
+ return nil, errors.Wrapf(ErrInvalidArg, "invalid cgroup manager specified: %s", cgroupManager)
+ }
+
+ // Create the exit files and attach sockets directories
+ if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err) {
+ return nil, errors.Wrapf(err, "error creating OCI runtime exit files directory %s",
+ runtime.exitsDir)
+ }
+ }
+ if err := os.MkdirAll(runtime.socketsDir, 0750); err != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err) {
+ return nil, errors.Wrapf(err, "error creating OCI runtime attach sockets directory %s",
+ runtime.socketsDir)
+ }
+ }
+
+ return runtime, nil
+}
+
+// newPipe creates a unix socket pair for communication
+func newPipe() (parent *os.File, child *os.File, err error) {
+ fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
+}
+
+// Create systemd unit name for cgroup scopes
+func createUnitName(prefix string, name string) string {
+ return fmt.Sprintf("%s-%s.scope", prefix, name)
+}
+
+// CreateContainer creates a container in the OCI runtime
+// TODO terminal support for container
+// Presently just ignoring conmon opts related to it
+func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string) error {
+ var stderrBuf bytes.Buffer
+
+ parentPipe, childPipe, err := newPipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating socket pair")
+ }
+
+ childStartPipe, parentStartPipe, err := newPipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating socket pair for start pipe")
+ }
+
+ defer parentPipe.Close()
+ defer parentStartPipe.Close()
+
+ args := []string{}
+ if r.cgroupManager == SystemdCgroupsManager {
+ args = append(args, "-s")
+ }
+ args = append(args, "-c", ctr.ID())
+ args = append(args, "-u", ctr.ID())
+ args = append(args, "-r", r.path)
+ args = append(args, "-b", ctr.bundlePath())
+ args = append(args, "-p", filepath.Join(ctr.state.RunDir, "pidfile"))
+ // TODO container log location should be configurable
+ // The default also likely shouldn't be this
+ args = append(args, "-l", filepath.Join(ctr.config.StaticDir, "ctr.log"))
+ args = append(args, "--exit-dir", r.exitsDir)
+ args = append(args, "--socket-dir-path", r.socketsDir)
+ if ctr.config.Spec.Process.Terminal {
+ args = append(args, "-t")
+ } else if ctr.config.Stdin {
+ args = append(args, "-i")
+ }
+ if r.logSizeMax >= 0 {
+ args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax))
+ }
+ if r.noPivot {
+ args = append(args, "--no-pivot")
+ }
+ logrus.WithFields(logrus.Fields{
+ "args": args,
+ }).Debugf("running conmon: %s", r.conmonPath)
+
+ cmd := exec.Command(r.conmonPath, args...)
+ cmd.Dir = ctr.state.RunDir
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ }
+ // TODO this is probably a really bad idea for some uses
+ // Make this configurable
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if ctr.config.Spec.Process.Terminal {
+ cmd.Stderr = &stderrBuf
+ }
+
+ cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe, childStartPipe)
+ // 0, 1 and 2 are stdin, stdout and stderr
+ cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4))
+
+ err = cmd.Start()
+ if err != nil {
+ childPipe.Close()
+ return err
+ }
+
+ // We don't need childPipe on the parent side
+ childPipe.Close()
+ childStartPipe.Close()
+
+ // Move conmon to specified cgroup
+ if r.cgroupManager == SystemdCgroupsManager {
+ logrus.Infof("Running conmon under slice %s and unitName %s", cgroupParent, createUnitName("libpod-conmon", ctr.ID()))
+ if err = utils.RunUnderSystemdScope(cmd.Process.Pid, cgroupParent, createUnitName("libpod-conmon", ctr.ID())); err != nil {
+ logrus.Warnf("Failed to add conmon to systemd sandbox cgroup: %v", err)
+ }
+ } else {
+ control, err := cgroups.New(cgroups.V1, cgroups.StaticPath(filepath.Join(cgroupParent, "/libpod-conmon-"+ctr.ID())), &spec.LinuxResources{})
+ if err != nil {
+ logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
+ } else {
+ // XXX: this defer does nothing as the cgroup can't be deleted cause
+ // it contains the conmon pid in tasks
+ // we need to remove this defer and delete the cgroup once conmon exits
+ // maybe need a conmon monitor?
+ defer control.Delete()
+ if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil {
+ logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
+ }
+ }
+ }
+
+ /* We set the cgroup, now the child can start creating children */
+ someData := []byte{0}
+ _, err = parentStartPipe.Write(someData)
+ if err != nil {
+ return err
+ }
+
+ /* Wait for initial setup and fork, and reap child */
+ err = cmd.Wait()
+ if err != nil {
+ return err
+ }
+
+ // TODO should do a defer r.deleteContainer(ctr) here if err != nil
+ // Need deleteContainer to be working first, though...
+
+ // Wait to get container pid from conmon
+ type syncStruct struct {
+ si *syncInfo
+ err error
+ }
+ ch := make(chan syncStruct)
+ go func() {
+ var si *syncInfo
+ if err = json.NewDecoder(parentPipe).Decode(&si); err != nil {
+ ch <- syncStruct{err: err}
+ return
+ }
+ ch <- syncStruct{si: si}
+ }()
+
+ select {
+ case ss := <-ch:
+ if ss.err != nil {
+ return errors.Wrapf(ss.err, "error reading container (probably exited) json message")
+ }
+ logrus.Debugf("Received container pid: %d", ss.si.Pid)
+ if ss.si.Pid == -1 {
+ if ss.si.Message != "" {
+ return errors.Wrapf(ErrInternal, "container create failed: %s", ss.si.Message)
+ }
+ return errors.Wrapf(ErrInternal, "container create failed")
+ }
+ case <-time.After(ContainerCreateTimeout):
+ return errors.Wrapf(ErrInternal, "container creation timeout")
+ }
+ return nil
+}
+
+// updateContainerStatus retrieves the current status of the container from the
+// runtime
+func (r *OCIRuntime) updateContainerStatus(ctr *Container) error {
+ return ErrNotImplemented
+}
+
+// startContainer starts the given container
+func (r *OCIRuntime) startContainer(ctr *Container) error {
+ // TODO: streams should probably *not* be our STDIN/OUT/ERR - redirect to buffers?
+ if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.path, "start", ctr.ID()); err != nil {
+ return err
+ }
+
+ // TODO record start time in container struct
+
+ return nil
+}
diff --git a/libpod/options.go b/libpod/options.go
new file mode 100644
index 000000000..982655fc0
--- /dev/null
+++ b/libpod/options.go
@@ -0,0 +1,382 @@
+package libpod
+
+import (
+ "fmt"
+
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/pkg/errors"
+)
+
+var (
+ ctrNotImplemented = func(c *Container) error {
+ return fmt.Errorf("NOT IMPLEMENTED")
+ }
+)
+
+const (
+ // IPCNamespace represents the IPC namespace
+ IPCNamespace = "ipc"
+ // MountNamespace represents the mount namespace
+ MountNamespace = "mount"
+ // NetNamespace represents the network namespace
+ NetNamespace = "net"
+ // PIDNamespace represents the PID namespace
+ PIDNamespace = "pid"
+ // UserNamespace represents the user namespace
+ UserNamespace = "user"
+ // UTSNamespace represents the UTS namespace
+ UTSNamespace = "uts"
+)
+
+// Runtime Creation Options
+
+// WithStorageConfig uses the given configuration to set up container storage
+// If this is not specified, the system default configuration will be used
+// instead
+func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.StorageConfig.RunRoot = config.RunRoot
+ rt.config.StorageConfig.GraphRoot = config.GraphRoot
+ rt.config.StorageConfig.GraphDriverName = config.GraphDriverName
+
+ rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions))
+ copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions)
+
+ rt.config.StorageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap))
+ copy(rt.config.StorageConfig.UIDMap, config.UIDMap)
+
+ rt.config.StorageConfig.GIDMap = make([]idtools.IDMap, len(config.GIDMap))
+ copy(rt.config.StorageConfig.GIDMap, config.GIDMap)
+
+ return nil
+ }
+}
+
+// WithImageConfig uses the given configuration to set up image handling
+// If this is not specified, the system default configuration will be used
+// instead
+func WithImageConfig(defaultTransport string, insecureRegistries, registries []string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.ImageDefaultTransport = defaultTransport
+
+ rt.config.InsecureRegistries = make([]string, len(insecureRegistries))
+ copy(rt.config.InsecureRegistries, insecureRegistries)
+
+ rt.config.Registries = make([]string, len(registries))
+ copy(rt.config.Registries, registries)
+
+ return nil
+ }
+}
+
+// WithSignaturePolicy specifies the path of a file which decides how trust is
+// managed for images we've pulled.
+// If this is not specified, the system default configuration will be used
+// instead
+func WithSignaturePolicy(path string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.SignaturePolicyPath = path
+
+ return nil
+ }
+}
+
+// WithOCIRuntime specifies an OCI runtime to use for running containers
+func WithOCIRuntime(runtimePath string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.RuntimePath = runtimePath
+
+ return nil
+ }
+}
+
+// WithConmonPath specifies the path to the conmon binary which manages the
+// runtime
+func WithConmonPath(path string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.ConmonPath = path
+
+ return nil
+ }
+}
+
+// WithConmonEnv specifies the environment variable list for the conmon process
+func WithConmonEnv(environment []string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.ConmonEnvVars = make([]string, len(environment))
+ copy(rt.config.ConmonEnvVars, environment)
+
+ return nil
+ }
+}
+
+// WithCgroupManager specifies the manager implementation name which is used to
+// handle cgroups for containers
+// Current valid values are "cgroupfs" and "systemd"
+func WithCgroupManager(manager string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.CgroupManager = manager
+
+ return nil
+ }
+}
+
+// WithStaticDir sets the directory that static runtime files which persist
+// across reboots will be stored
+func WithStaticDir(dir string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.StaticDir = dir
+
+ return nil
+ }
+}
+
+// WithTmpDir sets the directory that temporary runtime files which are not
+// expected to survive across reboots will be stored
+// This should be located on a tmpfs mount (/tmp or /var/run for example)
+func WithTmpDir(dir string) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.TmpDir = dir
+
+ return nil
+ }
+}
+
+// WithSELinux enables SELinux on the container server
+func WithSELinux() RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.SelinuxEnabled = true
+
+ return nil
+ }
+}
+
+// WithPidsLimit specifies the maximum number of processes each container is
+// restricted to
+func WithPidsLimit(limit int64) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.PidsLimit = limit
+
+ return nil
+ }
+}
+
+// WithMaxLogSize sets the maximum size of container logs
+// Positive sizes are limits in bytes, -1 is unlimited
+func WithMaxLogSize(limit int64) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.MaxLogSize = limit
+
+ return nil
+ }
+}
+
+// WithNoPivotRoot sets the runtime to use MS_MOVE instead of PIVOT_ROOT when
+// starting containers
+func WithNoPivotRoot(noPivot bool) RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.config.NoPivotRoot = true
+
+ return nil
+ }
+}
+
+// Container Creation Options
+
+// WithRootFSFromPath uses the given path as a container's root filesystem
+// No further setup is performed on this path
+func WithRootFSFromPath(path string) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ if ctr.config.RootfsDir != "" || ctr.config.RootfsImageID != "" || ctr.config.RootfsImageName != "" {
+ return errors.Wrapf(ErrInvalidArg, "container already configured with root filesystem")
+ }
+
+ ctr.config.RootfsDir = path
+ ctr.config.RootfsFromImage = false
+
+ return nil
+ }
+}
+
+// WithRootFSFromImage sets up a fresh root filesystem using the given image
+// If useImageConfig is specified, image volumes, environment variables, and
+// other configuration from the image will be added to the config
+// TODO: Replace image name and ID with a libpod.Image struct when that is finished
+func WithRootFSFromImage(imageID string, imageName string, useImageConfig bool) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ if ctr.config.RootfsDir != "" || ctr.config.RootfsImageID != "" || ctr.config.RootfsImageName != "" {
+ return errors.Wrapf(ErrInvalidArg, "container already configured with root filesystem")
+ }
+
+ ctr.config.RootfsImageID = imageID
+ ctr.config.RootfsImageName = imageName
+ ctr.config.UseImageConfig = useImageConfig
+ ctr.config.RootfsFromImage = true
+
+ return nil
+ }
+}
+
+// WithStdin keeps stdin on the container open to allow interaction
+func WithStdin() CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ ctr.config.Stdin = true
+
+ return nil
+ }
+}
+
+// WithSharedNamespaces sets a container to share namespaces with another
+// container. If the from container belongs to a pod, the new container will
+// be added to the pod.
+// By default no namespaces are shared. To share a namespace, add the Namespace
+// string constant to the map as a key
+func WithSharedNamespaces(from *Container, namespaces map[string]string) CtrCreateOption {
+ return ctrNotImplemented
+}
+
+// WithPod adds the container to a pod
+func (r *Runtime) WithPod(pod *Pod) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ if pod == nil {
+ return ErrInvalidArg
+ }
+
+ ctr.config.Pod = pod.ID()
+ ctr.pod = pod
+
+ return nil
+ }
+}
+
+// WithLabels adds labels to the container
+func WithLabels(labels map[string]string) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ ctr.config.Labels = make(map[string]string)
+ for key, value := range labels {
+ ctr.config.Labels[key] = value
+ }
+
+ return nil
+ }
+}
+
+// WithName sets the container's name
+func WithName(name string) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ ctr.config.Name = name
+
+ return nil
+ }
+}
+
+// WithStopSignal sets the signal that will be sent to stop the container
+func WithStopSignal(signal uint) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return ErrCtrFinalized
+ }
+
+ if signal == 0 {
+ return errors.Wrapf(ErrInvalidArg, "stop signal cannot be 0")
+ } else if signal > 64 {
+ return errors.Wrapf(ErrInvalidArg, "stop signal cannot be greater than 64 (SIGRTMAX)")
+ }
+
+ ctr.config.StopSignal = signal
+
+ return nil
+ }
+}
+
+// Pod Creation Options
+
+// WithPodName sets the name of the pod
+func WithPodName(name string) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return ErrPodFinalized
+ }
+
+ pod.name = name
+
+ return nil
+ }
+}
diff --git a/libpod/pod.go b/libpod/pod.go
new file mode 100644
index 000000000..48a761d57
--- /dev/null
+++ b/libpod/pod.go
@@ -0,0 +1,137 @@
+package libpod
+
+import (
+ "sync"
+
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/pkg/errors"
+)
+
+// Pod represents a group of containers that may share namespaces
+type Pod struct {
+ id string
+ name string
+
+ containers map[string]*Container
+
+ valid bool
+ lock sync.RWMutex
+}
+
+// ID retrieves the pod's ID
+func (p *Pod) ID() string {
+ return p.id
+}
+
+// Name retrieves the pod's name
+func (p *Pod) Name() string {
+ return p.name
+}
+
+// Creates a new pod
+func newPod() (*Pod, error) {
+ pod := new(Pod)
+ pod.id = stringid.GenerateNonCryptoID()
+ pod.name = pod.id // TODO generate human-readable name here
+
+ pod.containers = make(map[string]*Container)
+
+ return pod, nil
+}
+
+// Adds a container to the pod
+// Does not check that container's pod ID is set correctly, or attempt to set
+// pod ID after adding
+func (p *Pod) addContainer(ctr *Container) error {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ ctr.lock.Lock()
+ defer ctr.lock.Unlock()
+
+ if !p.valid {
+ return ErrPodRemoved
+ }
+
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ if _, ok := p.containers[ctr.ID()]; ok {
+ return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in pod %s", ctr.ID(), p.id)
+ }
+
+ p.containers[ctr.ID()] = ctr
+
+ return nil
+}
+
+// Removes a container from the pod
+// Does not perform any checks on the container
+func (p *Pod) removeContainer(ctr *Container) error {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if !p.valid {
+ return ErrPodRemoved
+ }
+
+ if _, ok := p.containers[ctr.ID()]; !ok {
+ return errors.Wrapf(ErrNoSuchCtr, "no container with id %s in pod %s", ctr.ID(), p.id)
+ }
+
+ delete(p.containers, ctr.ID())
+
+ return nil
+}
+
+// Start starts all containers within a pod that are not already running
+func (p *Pod) Start() error {
+ return ErrNotImplemented
+}
+
+// Stop stops all containers within a pod that are not already stopped
+func (p *Pod) Stop() error {
+ return ErrNotImplemented
+}
+
+// Kill sends a signal to all running containers within a pod
+func (p *Pod) Kill(signal uint) error {
+ return ErrNotImplemented
+}
+
+// HasContainer checks if a container is present in the pod
+func (p *Pod) HasContainer(id string) (bool, error) {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if !p.valid {
+ return false, ErrPodRemoved
+ }
+
+ _, ok := p.containers[id]
+
+ return ok, nil
+}
+
+// GetContainers retrieves the containers in the pod
+func (p *Pod) GetContainers() ([]*Container, error) {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if !p.valid {
+ return nil, ErrPodRemoved
+ }
+
+ ctrs := make([]*Container, 0, len(p.containers))
+ for _, ctr := range p.containers {
+ ctrs = append(ctrs, ctr)
+ }
+
+ return ctrs, nil
+}
+
+// Status gets the status of all containers in the pod
+// TODO This should return a summary of the states of all containers in the pod
+func (p *Pod) Status() error {
+ return ErrNotImplemented
+}
diff --git a/libpod/runtime.go b/libpod/runtime.go
new file mode 100644
index 000000000..80202c567
--- /dev/null
+++ b/libpod/runtime.go
@@ -0,0 +1,192 @@
+package libpod
+
+import (
+ "os"
+ "sync"
+
+ is "github.com/containers/image/storage"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/ulule/deepcopier"
+)
+
+// A RuntimeOption is a functional option which alters the Runtime created by
+// NewRuntime
+type RuntimeOption func(*Runtime) error
+
+// Runtime is the core libpod runtime
+type Runtime struct {
+ config *RuntimeConfig
+ state State
+ store storage.Store
+ storageService *storageService
+ imageContext *types.SystemContext
+ ociRuntime *OCIRuntime
+ valid bool
+ lock sync.RWMutex
+}
+
+// RuntimeConfig contains configuration options used to set up the runtime
+type RuntimeConfig struct {
+ StorageConfig storage.StoreOptions
+ ImageDefaultTransport string
+ InsecureRegistries []string
+ Registries []string
+ SignaturePolicyPath string
+ RuntimePath string
+ ConmonPath string
+ ConmonEnvVars []string
+ CgroupManager string
+ StaticDir string
+ TmpDir string
+ SelinuxEnabled bool
+ PidsLimit int64
+ MaxLogSize int64
+ NoPivotRoot bool
+}
+
+var (
+ defaultRuntimeConfig = RuntimeConfig{
+ // Leave this empty so containers/storage will use its defaults
+ StorageConfig: storage.StoreOptions{},
+ ImageDefaultTransport: "docker://",
+ RuntimePath: "/usr/bin/runc",
+ ConmonPath: "/usr/local/libexec/crio/conmon",
+ ConmonEnvVars: []string{
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ },
+ CgroupManager: "cgroupfs",
+ StaticDir: "/var/lib/libpod",
+ TmpDir: "/var/run/libpod",
+ SelinuxEnabled: false,
+ PidsLimit: 1024,
+ MaxLogSize: -1,
+ NoPivotRoot: false,
+ }
+)
+
+// NewRuntime creates a new container runtime
+// Options can be passed to override the default configuration for the runtime
+func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
+ runtime = new(Runtime)
+ runtime.config = new(RuntimeConfig)
+
+ // Copy the default configuration
+ deepcopier.Copy(defaultRuntimeConfig).To(runtime.config)
+
+ // Overwrite it with user-given configuration options
+ for _, opt := range options {
+ if err := opt(runtime); err != nil {
+ return nil, errors.Wrapf(err, "error configuring runtime")
+ }
+ }
+
+ // Set up containers/storage
+ store, err := storage.GetStore(runtime.config.StorageConfig)
+ if err != nil {
+ return nil, err
+ }
+ runtime.store = store
+ is.Transport.SetStore(store)
+ defer func() {
+ if err != nil {
+ // Don't forcibly shut down
+ // We could be opening a store in use by another libpod
+ _, err2 := runtime.store.Shutdown(false)
+ if err2 != nil {
+ logrus.Errorf("Error removing store for partially-created runtime: %s", err2)
+ }
+ }
+ }()
+
+ // Set up a storage service for creating container root filesystems from
+ // images
+ storageService, err := getStorageService(runtime.store)
+ if err != nil {
+ return nil, err
+ }
+ runtime.storageService = storageService
+
+ // Set up containers/image
+ runtime.imageContext = &types.SystemContext{
+ SignaturePolicyPath: runtime.config.SignaturePolicyPath,
+ }
+
+ // Set up the state
+ state, err := NewInMemoryState()
+ if err != nil {
+ return nil, err
+ }
+ runtime.state = state
+
+ // Make an OCI runtime to perform container operations
+ ociRuntime, err := newOCIRuntime("runc", runtime.config.RuntimePath,
+ runtime.config.ConmonPath, runtime.config.ConmonEnvVars,
+ runtime.config.CgroupManager, runtime.config.TmpDir,
+ runtime.config.MaxLogSize, runtime.config.NoPivotRoot)
+ if err != nil {
+ return nil, err
+ }
+ runtime.ociRuntime = ociRuntime
+
+ // Make the static files directory if it does not exist
+ if err := os.MkdirAll(runtime.config.StaticDir, 0755); err != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err) {
+ return nil, errors.Wrapf(err, "error creating runtime static files directory %s",
+ runtime.config.StaticDir)
+ }
+ }
+
+ // Make the per-boot files directory if it does not exist
+ if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil {
+ // The directory is allowed to exist
+ if !os.IsExist(err) {
+ return nil, errors.Wrapf(err, "error creating runtime temporary files directory %s",
+ runtime.config.TmpDir)
+ }
+ }
+
+ // Mark the runtime as valid - ready to be used, cannot be modified
+ // further
+ runtime.valid = true
+
+ return runtime, nil
+}
+
+// GetConfig returns a copy of the configuration used by the runtime
+func (r *Runtime) GetConfig() *RuntimeConfig {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil
+ }
+
+ config := new(RuntimeConfig)
+
+ // Copy so the caller won't be able to modify the actual config
+ deepcopier.Copy(r.config).To(config)
+
+ return config
+}
+
+// Shutdown shuts down the runtime and associated containers and storage
+// If force is true, containers and mounted storage will be shut down before
+// cleaning up; if force is false, an error will be returned if there are
+// still containers running or mounted
+func (r *Runtime) Shutdown(force bool) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return ErrRuntimeStopped
+ }
+
+ r.valid = false
+
+ _, err := r.store.Shutdown(force)
+ return err
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
new file mode 100644
index 000000000..45990d2db
--- /dev/null
+++ b/libpod/runtime_ctr.go
@@ -0,0 +1,229 @@
+package libpod
+
+import (
+ "github.com/containers/storage"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Contains the public Runtime API for containers
+
+// A CtrCreateOption is a functional option which alters the Container created
+// by NewContainer
+type CtrCreateOption func(*Container) error
+
+// ContainerFilter is a function to determine whether a container is included
+// in command output. Containers to be outputted are tested using the function.
+// A true return will include the container, a false return will exclude it.
+type ContainerFilter func(*Container) bool
+
+// NewContainer creates a new container from a given OCI config
+func (r *Runtime) NewContainer(spec *spec.Spec, options ...CtrCreateOption) (ctr *Container, err error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ ctr, err = newContainer(spec)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, option := range options {
+ if err := option(ctr); err != nil {
+ return nil, errors.Wrapf(err, "error running container create option")
+ }
+ }
+
+ ctr.valid = true
+ ctr.state.State = ContainerStateConfigured
+ ctr.runtime = r
+
+ // Set up storage for the container
+ if err := ctr.setupStorage(); err != nil {
+ return nil, errors.Wrapf(err, "error configuring storage for container")
+ }
+ defer func() {
+ if err != nil {
+ if err2 := ctr.teardownStorage(); err2 != nil {
+ logrus.Errorf("Error removing partially-created container root filesystem: %s", err2)
+ }
+ }
+ }()
+
+ // If the container is in a pod, add it to the pod
+ if ctr.pod != nil {
+ if err := ctr.pod.addContainer(ctr); err != nil {
+ return nil, errors.Wrapf(err, "error adding new container to pod %s", ctr.pod.ID())
+ }
+ }
+ defer func() {
+ if err != nil {
+ if err2 := ctr.pod.removeContainer(ctr); err2 != nil {
+ logrus.Errorf("Error removing partially-created container from pod %s: %s", ctr.pod.ID(), err2)
+ }
+ }
+ }()
+
+ if err := r.state.AddContainer(ctr); err != nil {
+ // TODO: Might be worth making an effort to detect duplicate IDs
+ // We can recover from that by generating a new ID for the
+ // container
+ return nil, errors.Wrapf(err, "error adding new container to state")
+ }
+
+ return ctr, nil
+}
+
+// RemoveContainer removes the given container
+// If force is specified, the container will be stopped first
+// Otherwise, RemoveContainer will return an error if the container is running
+func (r *Runtime) RemoveContainer(c *Container, force bool) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if !r.valid {
+ return ErrRuntimeStopped
+ }
+
+ if !c.valid {
+ return ErrCtrRemoved
+ }
+
+ // TODO check container status and unmount storage
+ // TODO check that no other containers depend on this container's
+ // namespaces
+ status, err := c.State()
+ if err != nil {
+ return err
+ }
+
+ // A container cannot be removed if it is running
+ if status == ContainerStateRunning {
+ return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it is running", c.ID())
+ }
+
+ if err := r.state.RemoveContainer(c); err != nil {
+ return errors.Wrapf(err, "error removing container from state")
+ }
+
+ // Set container as invalid so it can no longer be used
+ c.valid = false
+
+ // Remove container from pod, if it joined one
+ if c.pod != nil {
+ if err := c.pod.removeContainer(c); err != nil {
+ return errors.Wrapf(err, "error removing container from pod %s", c.pod.ID())
+ }
+ }
+
+ return nil
+}
+
+// GetContainer retrieves a container by its ID
+func (r *Runtime) GetContainer(id string) (*Container, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ return r.state.Container(id)
+}
+
+// HasContainer checks if a container with the given ID is present
+func (r *Runtime) HasContainer(id string) (bool, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return false, ErrRuntimeStopped
+ }
+
+ return r.state.HasContainer(id)
+}
+
+// LookupContainer looks up a container by its name or a partial ID
+// If a partial ID is not unique, an error will be returned
+func (r *Runtime) LookupContainer(idOrName string) (*Container, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ return r.state.LookupContainer(idOrName)
+}
+
+// GetContainers retrieves all containers from the state
+// Filters can be provided which will determine what containers are included in
+// the output. Multiple filters are handled by ANDing their output, so only
+// containers matching all filters are returned
+func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ ctrs, err := r.state.AllContainers()
+ if err != nil {
+ return nil, err
+ }
+
+ ctrsFiltered := make([]*Container, 0, len(ctrs))
+
+ for _, ctr := range ctrs {
+ include := true
+ for _, filter := range filters {
+ include = include && filter(ctr)
+ }
+
+ if include {
+ ctrsFiltered = append(ctrsFiltered, ctr)
+ }
+ }
+
+ return ctrsFiltered, nil
+}
+
+// getContainersWithImage returns a list of containers referencing imageID
+func (r *Runtime) getContainersWithImage(imageID string) ([]storage.Container, error) {
+ var matchingContainers []storage.Container
+ containers, err := r.store.Containers()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, ctr := range containers {
+ if ctr.ImageID == imageID {
+ matchingContainers = append(matchingContainers, ctr)
+ }
+ }
+ return matchingContainers, nil
+}
+
+// removeMultipleContainers deletes a list of containers from the store
+// TODO refactor this to remove libpod Containers
+func (r *Runtime) removeMultipleContainers(containers []storage.Container) error {
+ for _, ctr := range containers {
+ if err := r.store.DeleteContainer(ctr.ID); err != nil {
+ return errors.Wrapf(err, "could not remove container %q", ctr)
+ }
+ }
+ return nil
+}
+
+// ContainerConfigToDisk saves a container's nonvolatile configuration to disk
+func (r *Runtime) containerConfigToDisk(ctr *Container) error {
+ return ErrNotImplemented
+}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
new file mode 100644
index 000000000..9e7ad3106
--- /dev/null
+++ b/libpod/runtime_img.go
@@ -0,0 +1,823 @@
+package libpod
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "syscall"
+ "time"
+
+ cp "github.com/containers/image/copy"
+ dockerarchive "github.com/containers/image/docker/archive"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/docker/tarfile"
+ ociarchive "github.com/containers/image/oci/archive"
+ "github.com/containers/image/pkg/sysregistries"
+ "github.com/containers/image/signature"
+ is "github.com/containers/image/storage"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/transports/alltransports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/kubernetes-incubator/cri-o/libpod/common"
+ digest "github.com/opencontainers/go-digest"
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// Runtime API
+
+const (
+ // DefaultRegistry is a prefix that we apply to an image name
+ // to check docker hub first for the image
+ DefaultRegistry = "docker://"
+)
+
+var (
+ // DockerArchive is the transport we prepend to an image name
+ // when saving to docker-archive
+ DockerArchive = dockerarchive.Transport.Name()
+ // OCIArchive is the transport we prepend to an image name
+ // when saving to oci-archive
+ OCIArchive = ociarchive.Transport.Name()
+ // DirTransport is the transport for pushing and pulling
+ // images to and from a directory
+ DirTransport = "dir"
+)
+
+// CopyOptions contains the options given when pushing or pulling images
+type CopyOptions struct {
+ // Compression specifies the type of compression which is applied to
+ // layer blobs. The default is to not use compression, but
+ // archive.Gzip is recommended.
+ Compression archive.Compression
+ // DockerRegistryOptions encapsulates settings that affect how we
+ // connect or authenticate to a remote registry to which we want to
+ // push the image.
+ common.DockerRegistryOptions
+ // SigningOptions encapsulates settings that control whether or not we
+ // strip or add signatures to the image when pushing (uploading) the
+ // image to a registry.
+ common.SigningOptions
+
+ // SigningPolicyPath this points to a alternative signature policy file, used mainly for testing
+ SignaturePolicyPath string
+ // AuthFile is the path of the cached credentials file defined by the user
+ AuthFile string
+ // Writer is the reportWriter for the output
+ Writer io.Writer
+}
+
+// Image API
+
+// ImageFilterParams contains the filter options that may be given when outputting images
+type ImageFilterParams struct {
+ Dangling string
+ Label string
+ BeforeImage time.Time
+ SinceImage time.Time
+ ReferencePattern string
+ ImageName string
+ ImageInput string
+}
+
+// struct for when a user passes a short or incomplete
+// image name
+type imageDecomposeStruct struct {
+ imageName string
+ tag string
+ registry string
+ hasRegistry bool
+ transport string
+}
+
+// ImageFilter is a function to determine whether an image is included in
+// command output. Images to be outputted are tested using the function. A true
+// return will include the image, a false return will exclude it.
+type ImageFilter func(*storage.Image, *types.ImageInspectInfo) bool
+
+func (ips imageDecomposeStruct) returnFQName() string {
+ return fmt.Sprintf("%s%s/%s:%s", ips.transport, ips.registry, ips.imageName, ips.tag)
+}
+
+func getRegistriesToTry(image string, store storage.Store) ([]*pullStruct, error) {
+ var pStructs []*pullStruct
+ var imageError = fmt.Sprintf("unable to parse '%s'\n", image)
+ imgRef, err := reference.Parse(image)
+ if err != nil {
+ return nil, errors.Wrapf(err, imageError)
+ }
+ tagged, isTagged := imgRef.(reference.NamedTagged)
+ tag := "latest"
+ if isTagged {
+ tag = tagged.Tag()
+ }
+ hasDomain := true
+ registry := reference.Domain(imgRef.(reference.Named))
+ if registry == "" {
+ hasDomain = false
+ }
+ imageName := reference.Path(imgRef.(reference.Named))
+ pImage := imageDecomposeStruct{
+ imageName,
+ tag,
+ registry,
+ hasDomain,
+ "docker://",
+ }
+ if pImage.hasRegistry {
+ // If input has a registry, we have to assume they included an image
+ // name but maybe not a tag
+ srcRef, err := alltransports.ParseImageName(pImage.returnFQName())
+ if err != nil {
+ return nil, errors.Errorf(imageError)
+ }
+ pStruct := &pullStruct{
+ image: srcRef.DockerReference().String(),
+ srcRef: srcRef,
+ }
+ pStructs = append(pStructs, pStruct)
+ } else {
+ // No registry means we check the globals registries configuration file
+ // and assemble a list of candidate sources to try
+ registryConfigPath := ""
+ envOverride := os.Getenv("REGISTRIES_CONFIG_PATH")
+ if len(envOverride) > 0 {
+ registryConfigPath = envOverride
+ }
+ searchRegistries, err := sysregistries.GetRegistries(&types.SystemContext{SystemRegistriesConfPath: registryConfigPath})
+ if err != nil {
+ fmt.Println(err)
+ return nil, errors.Errorf("unable to parse the registries.conf file and"+
+ " the image name '%s' is incomplete.", imageName)
+ }
+ for _, searchRegistry := range searchRegistries {
+ pImage.registry = searchRegistry
+ srcRef, err := alltransports.ParseImageName(pImage.returnFQName())
+ if err != nil {
+ return nil, errors.Errorf("unable to parse '%s'", pImage.returnFQName())
+ }
+ pStruct := &pullStruct{
+ image: srcRef.DockerReference().String(),
+ srcRef: srcRef,
+ }
+ pStructs = append(pStructs, pStruct)
+ }
+ }
+
+ for _, pStruct := range pStructs {
+ destRef, err := is.Transport.ParseStoreReference(store, pStruct.image)
+ if err != nil {
+ return nil, errors.Errorf("error parsing dest reference name: %v", err)
+ }
+ pStruct.dstRef = destRef
+ }
+ return pStructs, nil
+}
+
+type pullStruct struct {
+ image string
+ srcRef types.ImageReference
+ dstRef types.ImageReference
+}
+
+func (r *Runtime) getPullStruct(srcRef types.ImageReference, destName string) (*pullStruct, error) {
+ reference := destName
+ if srcRef.DockerReference() != nil {
+ reference = srcRef.DockerReference().String()
+ }
+ destRef, err := is.Transport.ParseStoreReference(r.store, reference)
+ if err != nil {
+ return nil, errors.Errorf("error parsing dest reference name: %v", err)
+ }
+ return &pullStruct{
+ image: destName,
+ srcRef: srcRef,
+ dstRef: destRef,
+ }, nil
+}
+
+// returns a list of pullStruct with the srcRef and DstRef based on the transport being used
+func (r *Runtime) getPullListFromRef(srcRef types.ImageReference, imgName string, sc *types.SystemContext) ([]*pullStruct, error) {
+ var pullStructs []*pullStruct
+ splitArr := strings.Split(imgName, ":")
+ archFile := splitArr[len(splitArr)-1]
+
+ // supports pulling from docker-archive, oci, and registries
+ if srcRef.Transport().Name() == DockerArchive {
+ tarSource := tarfile.NewSource(archFile)
+ manifest, err := tarSource.LoadTarManifest()
+ if err != nil {
+ return nil, errors.Errorf("error retrieving manifest.json: %v", err)
+ }
+ // to pull the first image stored in the tar file
+ if len(manifest) == 0 {
+ // create an image object and use the hex value of the digest as the image ID
+ // for parsing the store reference
+ newImg, err := srcRef.NewImage(sc)
+ if err != nil {
+ return nil, err
+ }
+ defer newImg.Close()
+ digest := newImg.ConfigInfo().Digest
+ if err := digest.Validate(); err == nil {
+ pullInfo, err := r.getPullStruct(srcRef, "@"+digest.Hex())
+ if err != nil {
+ return nil, err
+ }
+ pullStructs = append(pullStructs, pullInfo)
+ } else {
+ return nil, errors.Wrapf(err, "error getting config info")
+ }
+ } else {
+ pullInfo, err := r.getPullStruct(srcRef, manifest[0].RepoTags[0])
+ if err != nil {
+ return nil, err
+ }
+ pullStructs = append(pullStructs, pullInfo)
+ }
+
+ } else if srcRef.Transport().Name() == OCIArchive {
+ // retrieve the manifest from index.json to access the image name
+ manifest, err := ociarchive.LoadManifestDescriptor(srcRef)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error loading manifest for %q", srcRef)
+ }
+
+ if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
+ return nil, errors.Errorf("error, archive doesn't have a name annotation. Cannot store image with no name")
+ }
+ pullInfo, err := r.getPullStruct(srcRef, manifest.Annotations["org.opencontainers.image.ref.name"])
+ if err != nil {
+ return nil, err
+ }
+ pullStructs = append(pullStructs, pullInfo)
+ } else if srcRef.Transport().Name() == DirTransport {
+ // supports pull from a directory
+ image := splitArr[1]
+ // remove leading "/"
+ if image[:1] == "/" {
+ image = image[1:]
+ }
+ pullInfo, err := r.getPullStruct(srcRef, image)
+ if err != nil {
+ return nil, err
+ }
+ pullStructs = append(pullStructs, pullInfo)
+ } else {
+ pullInfo, err := r.getPullStruct(srcRef, imgName)
+ if err != nil {
+ return nil, err
+ }
+ pullStructs = append(pullStructs, pullInfo)
+ }
+ return pullStructs, nil
+}
+
+// PullImage pulls an image from configured registries
+// By default, only the latest tag (or a specific tag if requested) will be
+// pulled. If allTags is true, all tags for the requested image will be pulled.
+// Signature validation will be performed if the Runtime has been appropriately
+// configured
+func (r *Runtime) PullImage(imgName string, options CopyOptions) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return ErrRuntimeStopped
+ }
+
+ // PullImage copies the image from the source to the destination
+ var pullStructs []*pullStruct
+
+ signaturePolicyPath := r.config.SignaturePolicyPath
+ if options.SignaturePolicyPath != "" {
+ signaturePolicyPath = options.SignaturePolicyPath
+ }
+
+ sc := common.GetSystemContext(signaturePolicyPath, options.AuthFile)
+
+ srcRef, err := alltransports.ParseImageName(imgName)
+ if err != nil {
+ // could be trying to pull from registry with short name
+ pullStructs, err = getRegistriesToTry(imgName, r.store)
+ if err != nil {
+ return errors.Wrap(err, "error getting default registries to try")
+ }
+ } else {
+ pullStructs, err = r.getPullListFromRef(srcRef, imgName, sc)
+ if err != nil {
+ return errors.Wrapf(err, "error getting pullStruct info to pull image %q", imgName)
+ }
+ }
+
+ policy, err := signature.DefaultPolicy(sc)
+ if err != nil {
+ return err
+ }
+
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return err
+ }
+ defer policyContext.Destroy()
+
+ copyOptions := common.GetCopyOptions(options.Writer, signaturePolicyPath, &options.DockerRegistryOptions, nil, options.SigningOptions, options.AuthFile)
+
+ for _, imageInfo := range pullStructs {
+ fmt.Printf("Trying to pull %s...\n", imageInfo.image)
+ if err = cp.Image(policyContext, imageInfo.dstRef, imageInfo.srcRef, copyOptions); err != nil {
+ fmt.Println("Failed")
+ } else {
+ return nil
+ }
+ }
+ return errors.Wrapf(err, "error pulling image from %q", imgName)
+}
+
+// PushImage pushes the given image to a location described by the given path
+func (r *Runtime) PushImage(source string, destination string, options CopyOptions) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return ErrRuntimeStopped
+ }
+
+ // PushImage pushes the src image to the destination
+ //func PushImage(source, destination string, options CopyOptions) error {
+ if source == "" || destination == "" {
+ return errors.Wrapf(syscall.EINVAL, "source and destination image names must be specified")
+ }
+
+ // Get the destination Image Reference
+ dest, err := alltransports.ParseImageName(destination)
+ if err != nil {
+ return errors.Wrapf(err, "error getting destination imageReference for %q", destination)
+ }
+
+ signaturePolicyPath := r.config.SignaturePolicyPath
+ if options.SignaturePolicyPath != "" {
+ signaturePolicyPath = options.SignaturePolicyPath
+ }
+
+ sc := common.GetSystemContext(signaturePolicyPath, options.AuthFile)
+
+ policy, err := signature.DefaultPolicy(sc)
+ if err != nil {
+ return err
+ }
+
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return err
+ }
+ defer policyContext.Destroy()
+
+ // Look up the source image, expecting it to be in local storage
+ src, err := is.Transport.ParseStoreReference(r.store, source)
+ if err != nil {
+ return errors.Wrapf(err, "error getting source imageReference for %q", source)
+ }
+
+ copyOptions := common.GetCopyOptions(options.Writer, signaturePolicyPath, nil, &options.DockerRegistryOptions, options.SigningOptions, options.AuthFile)
+
+ // Copy the image to the remote destination
+ err = cp.Image(policyContext, dest, src, copyOptions)
+ if err != nil {
+ return errors.Wrapf(err, "Error copying image to the remote destination")
+ }
+ return nil
+}
+
+// TagImage adds a tag to the given image
+func (r *Runtime) TagImage(image *storage.Image, tag string) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return ErrRuntimeStopped
+ }
+
+ tags, err := r.store.Names(image.ID)
+ if err != nil {
+ return err
+ }
+ for _, key := range tags {
+ if key == tag {
+ return nil
+ }
+ }
+ tags = append(tags, tag)
+ return r.store.SetNames(image.ID, tags)
+}
+
+// UntagImage removes a tag from the given image
+func (r *Runtime) UntagImage(image *storage.Image, tag string) (string, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return "", ErrRuntimeStopped
+ }
+
+ tags, err := r.store.Names(image.ID)
+ if err != nil {
+ return "", err
+ }
+ for i, key := range tags {
+ if key == tag {
+ tags[i] = tags[len(tags)-1]
+ tags = tags[:len(tags)-1]
+ break
+ }
+ }
+ if err = r.store.SetNames(image.ID, tags); err != nil {
+ return "", err
+ }
+ return tag, nil
+}
+
+// RemoveImage deletes an image from local storage
+// Images being used by running containers can only be removed if force=true
+func (r *Runtime) RemoveImage(image *storage.Image, force bool) (string, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return "", ErrRuntimeStopped
+ }
+
+ containersWithImage, err := r.getContainersWithImage(image.ID)
+ if err != nil {
+ return "", errors.Wrapf(err, "error getting containers for image %q", image.ID)
+ }
+ if len(containersWithImage) > 0 && len(image.Names) <= 1 {
+ if force {
+ if err := r.removeMultipleContainers(containersWithImage); err != nil {
+ return "", err
+ }
+ } else {
+ for _, ctr := range containersWithImage {
+ return "", fmt.Errorf("Could not remove image %q (must force) - container %q is using its reference image", image.ID, ctr.ImageID)
+ }
+ }
+ }
+
+ if len(image.Names) > 1 && !force {
+ return "", fmt.Errorf("unable to delete %s (must force) - image is referred to in multiple tags", image.ID)
+ }
+ // If it is forced, we have to untag the image so that it can be deleted
+ image.Names = image.Names[:0]
+
+ _, err = r.store.DeleteImage(image.ID, true)
+ if err != nil {
+ return "", err
+ }
+ return image.ID, nil
+}
+
+// GetImage retrieves an image matching the given name or hash from system
+// storage
+// If no matching image can be found, an error is returned
+func (r *Runtime) GetImage(image string) (*storage.Image, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+ return r.getImage(image)
+}
+
+func (r *Runtime) getImage(image string) (*storage.Image, error) {
+ var img *storage.Image
+ ref, err := is.Transport.ParseStoreReference(r.store, image)
+ if err == nil {
+ img, err = is.Transport.GetStoreImage(r.store, ref)
+ }
+ if err != nil {
+ img2, err2 := r.store.Image(image)
+ if err2 != nil {
+ if ref == nil {
+ return nil, errors.Wrapf(err, "error parsing reference to image %q", image)
+ }
+ return nil, errors.Wrapf(err, "unable to locate image %q", image)
+ }
+ img = img2
+ }
+ return img, nil
+}
+
+// GetImageRef searches for and returns a new types.Image matching the given name or ID in the given store.
+func (r *Runtime) GetImageRef(image string) (types.Image, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+ return r.getImageRef(image)
+
+}
+
+func (r *Runtime) getImageRef(image string) (types.Image, error) {
+ img, err := r.getImage(image)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to locate image %q", image)
+ }
+ ref, err := is.Transport.ParseStoreReference(r.store, "@"+img.ID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
+ }
+ imgRef, err := ref.NewImage(nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading image %q", img.ID)
+ }
+ return imgRef, nil
+}
+
+// GetImages retrieves all images present in storage
+// Filters can be provided which will determine which images are included in the
+// output. Multiple filters are handled by ANDing their output, so only images
+// matching all filters are included
+func (r *Runtime) GetImages(params *ImageFilterParams, filters ...ImageFilter) ([]*storage.Image, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ images, err := r.store.Images()
+ if err != nil {
+ return nil, err
+ }
+
+ var imagesFiltered []*storage.Image
+
+ for _, img := range images {
+ info, err := r.getImageInspectInfo(img)
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ if len(img.Names) > 0 {
+ names = img.Names
+ } else {
+ names = append(names, "<none>")
+ }
+ for _, name := range names {
+ include := true
+ if params != nil {
+ params.ImageName = name
+ }
+ for _, filter := range filters {
+ include = include && filter(&img, info)
+ }
+
+ if include {
+ newImage := img
+ newImage.Names = []string{name}
+ imagesFiltered = append(imagesFiltered, &newImage)
+ }
+ }
+ }
+
+ return imagesFiltered, nil
+}
+
+// GetHistory gets the history of an image and information about its layers
+func (r *Runtime) GetHistory(image string) ([]ociv1.History, []types.BlobInfo, string, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, nil, "", ErrRuntimeStopped
+ }
+
+ img, err := r.getImage(image)
+ if err != nil {
+ return nil, nil, "", errors.Wrapf(err, "no such image %q", image)
+ }
+
+ src, err := r.getImageRef(image)
+ if err != nil {
+ return nil, nil, "", errors.Wrapf(err, "error instantiating image %q", image)
+ }
+
+ oci, err := src.OCIConfig()
+ if err != nil {
+ return nil, nil, "", err
+ }
+
+ return oci.History, src.LayerInfos(), img.ID, nil
+}
+
+// ImportImage imports an OCI format image archive into storage as an image
+func (r *Runtime) ImportImage(path string) (*storage.Image, error) {
+ return nil, ErrNotImplemented
+}
+
+// GetImageInspectInfo returns the inspect information of an image
+func (r *Runtime) GetImageInspectInfo(image storage.Image) (*types.ImageInspectInfo, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+ return r.getImageInspectInfo(image)
+}
+
+func (r *Runtime) getImageInspectInfo(image storage.Image) (*types.ImageInspectInfo, error) {
+ img, err := r.getImageRef(image.ID)
+ if err != nil {
+ return nil, err
+ }
+ return img.Inspect()
+}
+
+// ParseImageFilter takes a set of images and a filter string as input, and returns the libpod.ImageFilterParams struct
+func (r *Runtime) ParseImageFilter(imageInput, filter string) (*ImageFilterParams, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ if filter == "" && imageInput == "" {
+ return nil, nil
+ }
+
+ var params ImageFilterParams
+ params.ImageInput = imageInput
+
+ if filter == "" && imageInput != "" {
+ return &params, nil
+ }
+
+ images, err := r.store.Images()
+ if err != nil {
+ return nil, err
+ }
+
+ filterStrings := strings.Split(filter, ",")
+ for _, param := range filterStrings {
+ pair := strings.SplitN(param, "=", 2)
+ switch strings.TrimSpace(pair[0]) {
+ case "dangling":
+ if common.IsValidBool(pair[1]) {
+ params.Dangling = pair[1]
+ } else {
+ return nil, fmt.Errorf("invalid filter: '%s=[%s]'", pair[0], pair[1])
+ }
+ case "label":
+ params.Label = pair[1]
+ case "before":
+ if img, err := findImageInSlice(images, pair[1]); err == nil {
+ info, err := r.GetImageInspectInfo(img)
+ if err != nil {
+ return nil, err
+ }
+ params.BeforeImage = info.Created
+ } else {
+ return nil, fmt.Errorf("no such id: %s", pair[0])
+ }
+ case "since":
+ if img, err := findImageInSlice(images, pair[1]); err == nil {
+ info, err := r.GetImageInspectInfo(img)
+ if err != nil {
+ return nil, err
+ }
+ params.SinceImage = info.Created
+ } else {
+ return nil, fmt.Errorf("no such id: %s``", pair[0])
+ }
+ case "reference":
+ params.ReferencePattern = pair[1]
+ default:
+ return nil, fmt.Errorf("invalid filter: '%s'", pair[0])
+ }
+ }
+ return &params, nil
+}
+
+// InfoAndDigestAndSize returns the inspection info and size of the image in the given
+// store and the digest of its manifest, if it has one, or "" if it doesn't.
+func (r *Runtime) InfoAndDigestAndSize(img storage.Image) (*types.ImageInspectInfo, digest.Digest, int64, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, "", -1, ErrRuntimeStopped
+ }
+
+ imgRef, err := r.getImageRef("@" + img.ID)
+ if err != nil {
+ return nil, "", -1, errors.Wrapf(err, "error reading image %q", img.ID)
+ }
+ defer imgRef.Close()
+ return infoAndDigestAndSize(imgRef)
+}
+
+func infoAndDigestAndSize(imgRef types.Image) (*types.ImageInspectInfo, digest.Digest, int64, error) {
+ imgSize, err := imgRef.Size()
+ if err != nil {
+ return nil, "", -1, errors.Wrapf(err, "error reading size of image %q", transports.ImageName(imgRef.Reference()))
+ }
+ manifest, _, err := imgRef.Manifest()
+ if err != nil {
+ return nil, "", -1, errors.Wrapf(err, "error reading manifest for image %q", transports.ImageName(imgRef.Reference()))
+ }
+ manifestDigest := digest.Digest("")
+ if len(manifest) > 0 {
+ manifestDigest = digest.Canonical.FromBytes(manifest)
+ }
+ info, err := imgRef.Inspect()
+ if err != nil {
+ return nil, "", -1, errors.Wrapf(err, "error inspecting image %q", transports.ImageName(imgRef.Reference()))
+ }
+ return info, manifestDigest, imgSize, nil
+}
+
+// MatchesID returns true if argID is a full or partial match for id
+func MatchesID(id, argID string) bool {
+ return strings.HasPrefix(argID, id)
+}
+
+// MatchesReference returns true if argName is a full or partial match for name
+// Partial matches will register only if they match the most specific part of the name available
+// For example, take the image docker.io/library/redis:latest
+// redis, library/redis, docker.io/library/redis, redis:latest, etc. will match
+// But redis:alpine, ry/redis, library, and io/library/redis will not
+func MatchesReference(name, argName string) bool {
+ if argName == "" {
+ return false
+ }
+ splitName := strings.Split(name, ":")
+ // If the arg contains a tag, we handle it differently than if it does not
+ if strings.Contains(argName, ":") {
+ splitArg := strings.Split(argName, ":")
+ return strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1])
+ }
+ return strings.HasSuffix(splitName[0], argName)
+}
+
+// ParseImageNames parses the names we've stored with an image into a list of
+// tagged references and a list of references which contain digests.
+func ParseImageNames(names []string) (tags, digests []string, err error) {
+ for _, name := range names {
+ if named, err := reference.ParseNamed(name); err == nil {
+ if digested, ok := named.(reference.Digested); ok {
+ canonical, err := reference.WithDigest(named, digested.Digest())
+ if err == nil {
+ digests = append(digests, canonical.String())
+ }
+ } else {
+ if reference.IsNameOnly(named) {
+ named = reference.TagNameOnly(named)
+ }
+ if tagged, ok := named.(reference.Tagged); ok {
+ namedTagged, err := reference.WithTag(named, tagged.Tag())
+ if err == nil {
+ tags = append(tags, namedTagged.String())
+ }
+ }
+ }
+ }
+ }
+ return tags, digests, nil
+}
+
+func annotations(manifest []byte, manifestType string) map[string]string {
+ annotations := make(map[string]string)
+ switch manifestType {
+ case ociv1.MediaTypeImageManifest:
+ var m ociv1.Manifest
+ if err := json.Unmarshal(manifest, &m); err == nil {
+ for k, v := range m.Annotations {
+ annotations[k] = v
+ }
+ }
+ }
+ return annotations
+}
+
+func findImageInSlice(images []storage.Image, ref string) (storage.Image, error) {
+ for _, image := range images {
+ if MatchesID(image.ID, ref) {
+ return image, nil
+ }
+ for _, name := range image.Names {
+ if MatchesReference(name, ref) {
+ return image, nil
+ }
+ }
+ }
+ return storage.Image{}, errors.New("could not find image")
+}
diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go
new file mode 100644
index 000000000..162b353e6
--- /dev/null
+++ b/libpod/runtime_pod.go
@@ -0,0 +1,122 @@
+package libpod
+
+import (
+ "github.com/pkg/errors"
+)
+
+// Contains the public Runtime API for pods
+
+// A PodCreateOption is a functional option which alters the Pod created by
+// NewPod
+type PodCreateOption func(*Pod) error
+
+// PodFilter is a function to determine whether a pod is included in command
+// output. Pods to be outputted are tested using the function. A true return
+// will include the pod, a false return will exclude it.
+type PodFilter func(*Pod) bool
+
+// NewPod makes a new, empty pod
+func (r *Runtime) NewPod(options ...PodCreateOption) (*Pod, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ pod, err := newPod()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating pod")
+ }
+
+ for _, option := range options {
+ if err := option(pod); err != nil {
+ return nil, errors.Wrapf(err, "error running pod create option")
+ }
+ }
+
+ pod.valid = true
+
+ if err := r.state.AddPod(pod); err != nil {
+ return nil, errors.Wrapf(err, "error adding pod to state")
+ }
+
+ return nil, ErrNotImplemented
+}
+
+// RemovePod removes a pod and all containers in it
+// If force is specified, all containers in the pod will be stopped first
+// Otherwise, RemovePod will return an error if any container in the pod is running
+// Remove acts atomically, removing all containers or no containers
+func (r *Runtime) RemovePod(p *Pod, force bool) error {
+ return ErrNotImplemented
+}
+
+// GetPod retrieves a pod by its ID
+func (r *Runtime) GetPod(id string) (*Pod, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ return r.state.Pod(id)
+}
+
+// HasPod checks to see if a pod with the given ID exists
+func (r *Runtime) HasPod(id string) (bool, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return false, ErrRuntimeStopped
+ }
+
+ return r.state.HasPod(id)
+}
+
+// LookupPod retrieves a pod by its name or a partial ID
+// If a partial ID is not unique, an error will be returned
+func (r *Runtime) LookupPod(idOrName string) (*Pod, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ return r.state.LookupPod(idOrName)
+}
+
+// Pods retrieves all pods
+// Filters can be provided which will determine which pods are included in the
+// output. Multiple filters are handled by ANDing their output, so only pods
+// matching all filters are returned
+func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ pods, err := r.state.AllPods()
+ if err != nil {
+ return nil, err
+ }
+
+ podsFiltered := make([]*Pod, 0, len(pods))
+ for _, pod := range pods {
+ include := true
+ for _, filter := range filters {
+ include = include && filter(pod)
+ }
+
+ if include {
+ podsFiltered = append(podsFiltered, pod)
+ }
+ }
+
+ return podsFiltered, nil
+}
diff --git a/libpod/state.go b/libpod/state.go
new file mode 100644
index 000000000..1c21911bb
--- /dev/null
+++ b/libpod/state.go
@@ -0,0 +1,38 @@
+package libpod
+
+// State is a storage backend for libpod's current state
+type State interface {
+ // Accepts full ID of container
+ Container(id string) (*Container, error)
+ // Accepts full or partial IDs (as long as they are unique) and names
+ LookupContainer(idOrName string) (*Container, error)
+ // Checks if a container with the given ID is present in the state
+ HasContainer(id string) (bool, error)
+ // Adds container to state
+ // If the container belongs to a pod, that pod must already be present
+ // in the state when the container is added, and the container must be
+ // present in the pod
+ AddContainer(ctr *Container) error
+ // Removes container from state
+ // The container will only be removed from the state, not from the pod
+ // which the container belongs to
+ RemoveContainer(ctr *Container) error
+ // Retrieves all containers presently in state
+ AllContainers() ([]*Container, error)
+
+ // Accepts full ID of pod
+ Pod(id string) (*Pod, error)
+ // Accepts full or partial IDs (as long as they are unique) and names
+ LookupPod(idOrName string) (*Pod, error)
+ // Checks if a pod with the given ID is present in the state
+ HasPod(id string) (bool, error)
+ // Adds pod to state
+ // Only empty pods can be added to the state
+ AddPod(pod *Pod) error
+ // Removes pod from state
+ // Containers within a pod will not be removed from the state, and will
+ // not be changed to remove them from the now-removed pod
+ RemovePod(pod *Pod) error
+ // Retrieves all pods presently in state
+ AllPods() ([]*Pod, error)
+}
diff --git a/libpod/storage.go b/libpod/storage.go
new file mode 100644
index 000000000..f0bf9e9cd
--- /dev/null
+++ b/libpod/storage.go
@@ -0,0 +1,261 @@
+package libpod
+
+import (
+ "encoding/json"
+ "time"
+
+ istorage "github.com/containers/image/storage"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type storageService struct {
+ store storage.Store
+}
+
+// getStorageService returns a storageService which can create container root
+// filesystems from images
+func getStorageService(store storage.Store) (*storageService, error) {
+ return &storageService{store: store}, nil
+}
+
+// ContainerInfo wraps a subset of information about a container: the locations
+// of its nonvolatile and volatile per-container directories, along with a copy
+// of the configuration blob from the image that was used to create the
+// container, if the image had a configuration.
+type ContainerInfo struct {
+ Dir string
+ RunDir string
+ Config *v1.Image
+}
+
+// RuntimeContainerMetadata is the structure that we encode as JSON and store
+// in the metadata field of storage.Container objects. It is used for
+// specifying attributes containers when they are being created, and allows a
+// container's MountLabel, and possibly other values, to be modified in one
+// read/write cycle via calls to storageService.ContainerMetadata,
+// RuntimeContainerMetadata.SetMountLabel, and
+// storageService.SetContainerMetadata.
+type RuntimeContainerMetadata struct {
+ // The provided name and the ID of the image that was used to
+ // instantiate the container.
+ ImageName string `json:"image-name"` // Applicable to both PodSandboxes and Containers
+ ImageID string `json:"image-id"` // Applicable to both PodSandboxes and Containers
+ // The container's name, which for an infrastructure container is usually PodName + "-infra".
+ ContainerName string `json:"name"` // Applicable to both PodSandboxes and Containers, mandatory
+ CreatedAt int64 `json:"created-at"` // Applicable to both PodSandboxes and Containers
+ MountLabel string `json:"mountlabel,omitempty"` // Applicable to both PodSandboxes and Containers
+}
+
+// SetMountLabel updates the mount label held by a RuntimeContainerMetadata
+// object.
+func (metadata *RuntimeContainerMetadata) SetMountLabel(mountLabel string) {
+ metadata.MountLabel = mountLabel
+}
+
+// CreateContainerStorage creates the storage end of things. We already have the container spec created
+// TO-DO We should be passing in an KpodImage object in the future.
+func (r *storageService) CreateContainerStorage(systemContext *types.SystemContext, imageName, imageID, containerName, containerID, mountLabel string) (ContainerInfo, error) {
+ var ref types.ImageReference
+ if imageName == "" && imageID == "" {
+ return ContainerInfo{}, ErrEmptyID
+ }
+ if containerName == "" {
+ return ContainerInfo{}, ErrEmptyID
+ }
+ //// Check if we have the specified image.
+ ref, err := istorage.Transport.ParseStoreReference(r.store, imageName)
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+ img, err := istorage.Transport.GetStoreImage(r.store, ref)
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+ // Pull out a copy of the image's configuration.
+ image, err := ref.NewImage(systemContext)
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+ defer image.Close()
+
+ imageConfig, err := image.OCIConfig()
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+
+ // Update the image name and ID.
+ if imageName == "" && len(img.Names) > 0 {
+ imageName = img.Names[0]
+ }
+ imageID = img.ID
+
+ // Build metadata to store with the container.
+ metadata := RuntimeContainerMetadata{
+ ImageName: imageName,
+ ImageID: imageID,
+ ContainerName: containerName,
+ CreatedAt: time.Now().Unix(),
+ MountLabel: mountLabel,
+ }
+ mdata, err := json.Marshal(&metadata)
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+
+ // Build the container.
+ names := []string{containerName}
+
+ container, err := r.store.CreateContainer(containerID, names, img.ID, "", string(mdata), nil)
+ if err != nil {
+ logrus.Debugf("failed to create container %s(%s): %v", metadata.ContainerName, containerID, err)
+
+ return ContainerInfo{}, err
+ }
+ logrus.Debugf("created container %q", container.ID)
+
+ // If anything fails after this point, we need to delete the incomplete
+ // container before returning.
+ defer func() {
+ if err != nil {
+ if err2 := r.store.DeleteContainer(container.ID); err2 != nil {
+ logrus.Infof("%v deleting partially-created container %q", err2, container.ID)
+
+ return
+ }
+ logrus.Infof("deleted partially-created container %q", container.ID)
+ }
+ }()
+
+ // Add a name to the container's layer so that it's easier to follow
+ // what's going on if we're just looking at the storage-eye view of things.
+ layerName := metadata.ContainerName + "-layer"
+ names, err = r.store.Names(container.LayerID)
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+ names = append(names, layerName)
+ err = r.store.SetNames(container.LayerID, names)
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+
+ // Find out where the container work directories are, so that we can return them.
+ containerDir, err := r.store.ContainerDirectory(container.ID)
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+ logrus.Debugf("container %q has work directory %q", container.ID, containerDir)
+
+ containerRunDir, err := r.store.ContainerRunDirectory(container.ID)
+ if err != nil {
+ return ContainerInfo{}, err
+ }
+ logrus.Debugf("container %q has run directory %q", container.ID, containerRunDir)
+
+ return ContainerInfo{
+ Dir: containerDir,
+ RunDir: containerRunDir,
+ Config: imageConfig,
+ }, nil
+}
+
+func (r *storageService) DeleteContainer(idOrName string) error {
+ if idOrName == "" {
+ return ErrEmptyID
+ }
+ container, err := r.store.Container(idOrName)
+ if err != nil {
+ return err
+ }
+ err = r.store.DeleteContainer(container.ID)
+ if err != nil {
+ logrus.Debugf("failed to delete container %q: %v", container.ID, err)
+ return err
+ }
+ return nil
+}
+
+func (r *storageService) SetContainerMetadata(idOrName string, metadata RuntimeContainerMetadata) error {
+ mdata, err := json.Marshal(&metadata)
+ if err != nil {
+ logrus.Debugf("failed to encode metadata for %q: %v", idOrName, err)
+ return err
+ }
+ return r.store.SetMetadata(idOrName, string(mdata))
+}
+
+func (r *storageService) GetContainerMetadata(idOrName string) (RuntimeContainerMetadata, error) {
+ metadata := RuntimeContainerMetadata{}
+ mdata, err := r.store.Metadata(idOrName)
+ if err != nil {
+ return metadata, err
+ }
+ if err = json.Unmarshal([]byte(mdata), &metadata); err != nil {
+ return metadata, err
+ }
+ return metadata, nil
+}
+
+func (r *storageService) StartContainer(idOrName string) (string, error) {
+ container, err := r.store.Container(idOrName)
+ if err != nil {
+ if errors.Cause(err) == storage.ErrContainerUnknown {
+ return "", ErrNoSuchCtr
+ }
+ return "", err
+ }
+ metadata := RuntimeContainerMetadata{}
+ if err = json.Unmarshal([]byte(container.Metadata), &metadata); err != nil {
+ return "", err
+ }
+ mountPoint, err := r.store.Mount(container.ID, metadata.MountLabel)
+ if err != nil {
+ logrus.Debugf("failed to mount container %q: %v", container.ID, err)
+ return "", err
+ }
+ logrus.Debugf("mounted container %q at %q", container.ID, mountPoint)
+ return mountPoint, nil
+}
+
+func (r *storageService) StopContainer(idOrName string) error {
+ if idOrName == "" {
+ return ErrEmptyID
+ }
+ container, err := r.store.Container(idOrName)
+ if err != nil {
+ return err
+ }
+ err = r.store.Unmount(container.ID)
+ if err != nil {
+ logrus.Debugf("failed to unmount container %q: %v", container.ID, err)
+ return err
+ }
+ logrus.Debugf("unmounted container %q", container.ID)
+ return nil
+}
+
+func (r *storageService) GetWorkDir(id string) (string, error) {
+ container, err := r.store.Container(id)
+ if err != nil {
+ if errors.Cause(err) == storage.ErrContainerUnknown {
+ return "", ErrNoSuchCtr
+ }
+ return "", err
+ }
+ return r.store.ContainerDirectory(container.ID)
+}
+
+func (r *storageService) GetRunDir(id string) (string, error) {
+ container, err := r.store.Container(id)
+ if err != nil {
+ if errors.Cause(err) == storage.ErrContainerUnknown {
+ return "", ErrNoSuchCtr
+ }
+ return "", err
+ }
+ return r.store.ContainerRunDirectory(container.ID)
+}