summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/container.go4
-rw-r--r--libpod/container_api.go50
-rw-r--r--libpod/container_inspect.go303
-rw-r--r--libpod/container_internal.go43
-rw-r--r--libpod/container_internal_linux.go137
-rw-r--r--libpod/driver/driver.go11
-rw-r--r--libpod/errors.go3
-rw-r--r--libpod/healthcheck.go33
-rw-r--r--libpod/image/image.go2
-rw-r--r--libpod/networking_linux.go3
-rw-r--r--libpod/networking_unsupported.go6
-rw-r--r--libpod/oci.go12
-rw-r--r--libpod/oci_linux.go20
-rw-r--r--libpod/options.go2
-rw-r--r--libpod/pod.go6
-rw-r--r--libpod/runtime.go39
-rw-r--r--libpod/runtime_cstorage.go118
-rw-r--r--libpod/runtime_ctr.go88
-rw-r--r--libpod/volume.go1
19 files changed, 744 insertions, 137 deletions
diff --git a/libpod/container.go b/libpod/container.go
index c8ab42fc3..68c4cd6b0 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -135,7 +135,6 @@ const (
// assume that their callers handled this requirement. Generally speaking, if a
// function takes the container lock and accesses any part of state, it should
// syncContainer() immediately after locking.
-// ffjson: skip
type Container struct {
config *ContainerConfig
@@ -161,7 +160,6 @@ type Container struct {
// ContainerState contains the current state of the container
// It is stored on disk in a tmpfs and recreated on reboot
-// easyjson:json
type ContainerState struct {
// The current state of the running container
State ContainerStatus `json:"state"`
@@ -222,7 +220,6 @@ type ContainerState struct {
}
// ExecSession contains information on an active exec session
-// easyjson:json
type ExecSession struct {
ID string `json:"id"`
Command []string `json:"command"`
@@ -232,7 +229,6 @@ type ExecSession struct {
// ContainerConfig contains all information that was used to create the
// container. It may not be changed once created.
// It is stored, read-only, on disk
-// easyjson:json
type ContainerConfig struct {
Spec *spec.Spec `json:"spec"`
ID string `json:"id"`
diff --git a/libpod/container_api.go b/libpod/container_api.go
index eff5bfe5f..0e877d04e 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -10,9 +10,7 @@ import (
"sync"
"time"
- "github.com/containers/libpod/libpod/driver"
"github.com/containers/libpod/libpod/events"
- "github.com/containers/libpod/pkg/inspect"
"github.com/containers/libpod/pkg/lookup"
"github.com/containers/storage/pkg/stringid"
"github.com/docker/docker/oci/caps"
@@ -182,7 +180,7 @@ func (c *Container) StopWithTimeout(timeout uint) error {
if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateUnknown ||
c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s in state %s", c.ID(), c.state.State.String())
+ return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String())
}
if c.state.State == ContainerStateStopped ||
@@ -205,7 +203,7 @@ func (c *Container) Kill(signal uint) error {
}
if c.state.State != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers")
+ return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}
defer c.newContainerEvent(events.Kill)
@@ -243,7 +241,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
// TODO can probably relax this once we track exec sessions
if conState != ContainerStateRunning {
- return errors.Errorf("cannot exec into container that is not running")
+ return errors.Wrapf(ErrCtrStateInvalid, "cannot exec into container that is not running")
}
if privileged || c.config.Privileged {
capList = caps.GetAllCapabilities()
@@ -535,32 +533,6 @@ func (c *Container) RemoveArtifact(name string) error {
return os.Remove(c.getArtifactPath(name))
}
-// Inspect a container for low-level information
-func (c *Container) Inspect(size bool) (*inspect.ContainerInspectData, error) {
- if !c.batched {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- if err := c.syncContainer(); err != nil {
- return nil, err
- }
- }
-
- storeCtr, err := c.runtime.store.Container(c.ID())
- if err != nil {
- return nil, errors.Wrapf(err, "error getting container from store %q", c.ID())
- }
- layer, err := c.runtime.store.Layer(storeCtr.LayerID)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID)
- }
- driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
- if err != nil {
- return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID())
- }
- return c.getContainerInspectData(size, driverData)
-}
-
// Wait blocks until the container exits and returns its exit code.
func (c *Container) Wait() (int32, error) {
return c.WaitWithInterval(DefaultWaitInterval)
@@ -815,11 +787,27 @@ type ContainerCheckpointOptions struct {
// TCPEstablished tells the API to checkpoint a container
// even if it contains established TCP connections
TCPEstablished bool
+ // Export tells the API to write the checkpoint image to
+ // the filename set in TargetFile
+ // Import tells the API to read the checkpoint image from
+ // the filename set in TargetFile
+ TargetFile string
+ // Name tells the API that during restore from an exported
+ // checkpoint archive a new name should be used for the
+ // restored container
+ Name string
}
// Checkpoint checkpoints a container
func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
logrus.Debugf("Trying to checkpoint container %s", c.ID())
+
+ if options.TargetFile != "" {
+ if err := c.prepareCheckpointExport(); err != nil {
+ return err
+ }
+ }
+
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index a7369bfdd..0a62ceb7c 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -1,15 +1,157 @@
package libpod
import (
- "strings"
+ "time"
- "github.com/containers/libpod/pkg/inspect"
+ "github.com/containers/libpod/libpod/driver"
"github.com/cri-o/ocicni/pkg/ocicni"
- specs "github.com/opencontainers/runtime-spec/specs-go"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data) (*inspect.ContainerInspectData, error) {
+// InspectContainerData provides a detailed record of a container's configuration
+// and state as viewed by Libpod.
+// Large portions of this structure are defined such that the output is
+// compatible with `docker inspect` JSON, but additional fields have been added
+// as required to share information not in the original output.
+type InspectContainerData struct {
+ ID string `json:"Id"`
+ Created time.Time `json:"Created"`
+ Path string `json:"Path"`
+ Args []string `json:"Args"`
+ State *InspectContainerState `json:"State"`
+ ImageID string `json:"Image"`
+ ImageName string `json:"ImageName"`
+ Rootfs string `json:"Rootfs"`
+ ResolvConfPath string `json:"ResolvConfPath"`
+ HostnamePath string `json:"HostnamePath"`
+ HostsPath string `json:"HostsPath"`
+ StaticDir string `json:"StaticDir"`
+ OCIConfigPath string `json:"OCIConfigPath,omitempty"`
+ LogPath string `json:"LogPath"`
+ ConmonPidFile string `json:"ConmonPidFile"`
+ Name string `json:"Name"`
+ RestartCount int32 `json:"RestartCount"`
+ Driver string `json:"Driver"`
+ MountLabel string `json:"MountLabel"`
+ ProcessLabel string `json:"ProcessLabel"`
+ AppArmorProfile string `json:"AppArmorProfile"`
+ EffectiveCaps []string `json:"EffectiveCaps"`
+ BoundingCaps []string `json:"BoundingCaps"`
+ ExecIDs []string `json:"ExecIDs"`
+ GraphDriver *driver.Data `json:"GraphDriver"`
+ SizeRw int64 `json:"SizeRw,omitempty"`
+ SizeRootFs int64 `json:"SizeRootFs,omitempty"`
+ Mounts []*InspectMount `json:"Mounts"`
+ Dependencies []string `json:"Dependencies"`
+ NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` //TODO
+ ExitCommand []string `json:"ExitCommand"`
+ Namespace string `json:"Namespace"`
+ IsInfra bool `json:"IsInfra"`
+}
+
+// InspectMount provides a record of a single mount in a container. It contains
+// fields for both named and normal volumes. Only user-specified volumes will be
+// included, and tmpfs volumes are not included even if the user specified them.
+type InspectMount struct {
+ // Whether the mount is a volume or bind mount. Allowed values are
+ // "volume" and "bind".
+ Type string `json:"Type"`
+ // The name of the volume. Empty for bind mounts.
+ Name string `json:"Name,omptempty"`
+ // The source directory for the volume.
+ Src string `json:"Source"`
+ // The destination directory for the volume. Specified as a path within
+ // the container, as it would be passed into the OCI runtime.
+ Dst string `json:"Destination"`
+ // The driver used for the named volume. Empty for bind mounts.
+ Driver string `json:"Driver"`
+ // Contains SELinux :z/:Z mount options. Unclear what, if anything, else
+ // goes in here.
+ Mode string `json:"Mode"`
+ // All remaining mount options. Additional data, not present in the
+ // original output.
+ Options []string `json:"Options"`
+ // Whether the volume is read-write
+ RW bool `json:"RW"`
+ // Mount propagation for the mount. Can be empty if not specified, but
+ // is always printed - no omitempty.
+ Propagation string `json:"Propagation"`
+}
+
+// InspectContainerState provides a detailed record of a container's current
+// state. It is returned as part of InspectContainerData.
+// As with InspectContainerData, many portions of this struct are matched to
+// Docker, but here we see more fields that are unused (nonsensical in the
+// context of Libpod).
+type InspectContainerState struct {
+ OciVersion string `json:"OciVersion"`
+ Status string `json:"Status"`
+ Running bool `json:"Running"`
+ Paused bool `json:"Paused"`
+ Restarting bool `json:"Restarting"` // TODO
+ OOMKilled bool `json:"OOMKilled"`
+ Dead bool `json:"Dead"`
+ Pid int `json:"Pid"`
+ ExitCode int32 `json:"ExitCode"`
+ Error string `json:"Error"` // TODO
+ StartedAt time.Time `json:"StartedAt"`
+ FinishedAt time.Time `json:"FinishedAt"`
+ Healthcheck HealthCheckResults `json:"Healthcheck,omitempty"`
+}
+
+// InspectNetworkSettings holds information about the network settings of the
+// container.
+// Many fields are maintained only for compatibility with `docker inspect` and
+// are unused within Libpod.
+type InspectNetworkSettings struct {
+ Bridge string `json:"Bridge"`
+ SandboxID string `json:"SandboxID"`
+ HairpinMode bool `json:"HairpinMode"`
+ LinkLocalIPv6Address string `json:"LinkLocalIPv6Address"`
+ LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen"`
+ Ports []ocicni.PortMapping `json:"Ports"`
+ SandboxKey string `json:"SandboxKey"`
+ SecondaryIPAddresses []string `json:"SecondaryIPAddresses"`
+ SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses"`
+ EndpointID string `json:"EndpointID"`
+ Gateway string `json:"Gateway"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen"`
+ IPAddress string `json:"IPAddress"`
+ IPPrefixLen int `json:"IPPrefixLen"`
+ IPv6Gateway string `json:"IPv6Gateway"`
+ MacAddress string `json:"MacAddress"`
+}
+
+// Inspect a container for low-level information
+func (c *Container) Inspect(size bool) (*InspectContainerData, error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return nil, err
+ }
+ }
+
+ storeCtr, err := c.runtime.store.Container(c.ID())
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting container from store %q", c.ID())
+ }
+ layer, err := c.runtime.store.Layer(storeCtr.LayerID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID)
+ }
+ driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID())
+ }
+ return c.getContainerInspectData(size, driverData)
+}
+
+func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*InspectContainerData, error) {
config := c.config
runtimeInfo := c.state
spec, err := c.specFromState()
@@ -35,42 +177,32 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
execIDs = append(execIDs, id)
}
- if c.state.BindMounts == nil {
- c.state.BindMounts = make(map[string]string)
- }
-
resolvPath := ""
- if getPath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
- resolvPath = getPath
- }
-
hostsPath := ""
- if getPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
- hostsPath = getPath
- }
-
hostnamePath := ""
- if getPath, ok := c.state.BindMounts["/etc/hostname"]; ok {
- hostnamePath = getPath
- }
-
- var mounts []specs.Mount
- for i, mnt := range spec.Mounts {
- mounts = append(mounts, mnt)
- // We only want to show the name of the named volume in the inspect
- // output, so split the path and get the name out of it.
- if strings.Contains(mnt.Source, c.runtime.config.VolumePath) {
- split := strings.Split(mnt.Source[len(c.runtime.config.VolumePath)+1:], "/")
- mounts[i].Source = split[0]
+ if c.state.BindMounts != nil {
+ if getPath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
+ resolvPath = getPath
+ }
+ if getPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
+ hostsPath = getPath
+ }
+ if getPath, ok := c.state.BindMounts["/etc/hostname"]; ok {
+ hostnamePath = getPath
}
}
- data := &inspect.ContainerInspectData{
+ mounts, err := c.getInspectMounts()
+ if err != nil {
+ return nil, err
+ }
+
+ data := &InspectContainerData{
ID: config.ID,
Created: config.CreatedTime,
Path: path,
Args: args,
- State: &inspect.ContainerInspectState{
+ State: &InspectContainerState{
OciVersion: spec.Version,
Status: runtimeInfo.State.String(),
Running: runtimeInfo.State == ContainerStateRunning,
@@ -106,7 +238,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
GraphDriver: driverData,
Mounts: mounts,
Dependencies: c.Dependencies(),
- NetworkSettings: &inspect.NetworkSettings{
+ NetworkSettings: &InspectNetworkSettings{
Bridge: "", // TODO
SandboxID: "", // TODO - is this even relevant?
HairpinMode: false, // TODO
@@ -129,8 +261,12 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
IsInfra: c.IsInfra(),
}
+ if c.state.ConfigPath != "" {
+ data.OCIConfigPath = c.state.ConfigPath
+ }
+
if c.config.HealthCheckConfig != nil {
- // This container has a healthcheck defined in it; we need to add it's state
+ // This container has a healthcheck defined in it; we need to add it's state
healthCheckState, err := c.GetHealthCheckLog()
if err != nil {
// An error here is not considered fatal; no health state will be displayed
@@ -162,3 +298,106 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
}
return data, nil
}
+
+// Get inspect-formatted mounts list.
+// Only includes user-specified mounts. Only includes bind mounts and named
+// volumes, not tmpfs volumes.
+func (c *Container) getInspectMounts() ([]*InspectMount, error) {
+ inspectMounts := []*InspectMount{}
+
+ // No mounts, return early
+ if len(c.config.UserVolumes) == 0 {
+ return inspectMounts, nil
+ }
+
+ // We need to parse all named volumes and mounts into maps, so we don't
+ // end up with repeated lookups for each user volume.
+ // Map destination to struct, as destination is what is stored in
+ // UserVolumes.
+ namedVolumes := make(map[string]*ContainerNamedVolume)
+ mounts := make(map[string]spec.Mount)
+ for _, namedVol := range c.config.NamedVolumes {
+ namedVolumes[namedVol.Dest] = namedVol
+ }
+ for _, mount := range c.config.Spec.Mounts {
+ mounts[mount.Destination] = mount
+ }
+
+ for _, vol := range c.config.UserVolumes {
+ // We need to look up the volumes.
+ // First: is it a named volume?
+ if volume, ok := namedVolumes[vol]; ok {
+ mountStruct := new(InspectMount)
+ mountStruct.Type = "volume"
+ mountStruct.Dst = volume.Dest
+ mountStruct.Name = volume.Name
+
+ // For src and driver, we need to look up the named
+ // volume.
+ volFromDB, err := c.runtime.state.Volume(volume.Name)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
+ }
+ mountStruct.Driver = volFromDB.Driver()
+ mountStruct.Src = volFromDB.MountPoint()
+
+ parseMountOptionsForInspect(volume.Options, mountStruct)
+
+ inspectMounts = append(inspectMounts, mountStruct)
+ } else if mount, ok := mounts[vol]; ok {
+ // It's a mount.
+ // Is it a tmpfs? If so, discard.
+ if mount.Type == "tmpfs" {
+ continue
+ }
+
+ mountStruct := new(InspectMount)
+ mountStruct.Type = "bind"
+ mountStruct.Src = mount.Source
+ mountStruct.Dst = mount.Destination
+
+ parseMountOptionsForInspect(mount.Options, mountStruct)
+
+ inspectMounts = append(inspectMounts, mountStruct)
+ }
+ // We couldn't find a mount. Log a warning.
+ logrus.Warnf("Could not find mount at destination %q when building inspect output for container %s", vol, c.ID())
+ }
+
+ return inspectMounts, nil
+}
+
+// Parse mount options so we can populate them in the mount structure.
+// The mount passed in will be modified.
+func parseMountOptionsForInspect(options []string, mount *InspectMount) {
+ isRW := true
+ mountProp := ""
+ zZ := ""
+ otherOpts := []string{}
+
+ // Some of these may be overwritten if the user passes us garbage opts
+ // (for example, [ro,rw])
+ // We catch these on the Podman side, so not a problem there, but other
+ // users of libpod who do not properly validate mount options may see
+ // this.
+ // Not really worth dealing with on our end - garbage in, garbage out.
+ for _, opt := range options {
+ switch opt {
+ case "ro":
+ isRW = false
+ case "rw":
+ // Do nothing, silently discard
+ case "shared", "slave", "private", "rshared", "rslave", "rprivate":
+ mountProp = opt
+ case "z", "Z":
+ zZ = opt
+ default:
+ otherOpts = append(otherOpts, opt)
+ }
+ }
+
+ mount.RW = isRW
+ mount.Propagation = mountProp
+ mount.Mode = zZ
+ mount.Options = otherOpts
+}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 5f8dd1c72..9245a8840 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -21,6 +21,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/mount"
spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
@@ -359,7 +360,8 @@ func (c *Container) setupStorage(ctx context.Context) error {
}
return false
}
- defOptions, err := storage.GetDefaultMountOptions()
+
+ defOptions, err := storage.GetMountOptions(c.runtime.store.GraphDriverName(), c.runtime.store.GraphOptions())
if err != nil {
return errors.Wrapf(err, "error getting default mount options")
}
@@ -1345,7 +1347,7 @@ func (c *Container) appendStringToRundir(destFile, output string) (string, error
return filepath.Join(c.state.RunDir, destFile), nil
}
-// Save OCI spec to disk, replacing any existing specs for the container
+// saveSpec saves the OCI spec to disk, replacing any existing specs for the container
func (c *Container) saveSpec(spec *spec.Spec) error {
// If the OCI spec already exists, we need to replace it
// Cannot guarantee some things, e.g. network namespaces, have the same
@@ -1501,3 +1503,40 @@ func (c *Container) checkReadyForRemoval() error {
return nil
}
+
+// writeJSONFile marshalls and writes the given data to a JSON file
+// in the bundle path
+func (c *Container) writeJSONFile(v interface{}, file string) (err error) {
+ fileJSON, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID())
+ }
+ file = filepath.Join(c.bundlePath(), file)
+ if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// prepareCheckpointExport writes the config and spec to
+// JSON files for later export
+func (c *Container) prepareCheckpointExport() (err error) {
+ // save live config
+ if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil {
+ return err
+ }
+
+ // save spec
+ jsonPath := filepath.Join(c.bundlePath(), "config.json")
+ g, err := generate.NewFromFile(jsonPath)
+ if err != nil {
+ logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err)
+ return err
+ }
+ if err := c.writeJSONFile(g.Spec(), "spec.dump"); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index f25f76092..55cc5089b 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -5,6 +5,7 @@ package libpod
import (
"context"
"fmt"
+ "io"
"io/ioutil"
"net"
"os"
@@ -25,6 +26,7 @@ import (
"github.com/containers/libpod/pkg/lookup"
"github.com/containers/libpod/pkg/resolvconf"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage/pkg/archive"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -422,7 +424,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// It also expects to be able to write to /sys/fs/cgroup/systemd and /var/log/journal
func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) error {
options := []string{"rw", "rprivate", "noexec", "nosuid", "nodev"}
- for _, dest := range []string{"/run"} {
+ for _, dest := range []string{"/run", "/run/lock"} {
if MountExists(mounts, dest) {
continue
}
@@ -496,6 +498,45 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
return nil
}
+func (c *Container) exportCheckpoint(dest string) (err error) {
+ if (len(c.config.NamedVolumes) > 0) || (len(c.Dependencies()) > 0) {
+ return errors.Errorf("Cannot export checkpoints of containers with named volumes or dependencies")
+ }
+ logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), dest)
+ input, err := archive.TarWithOptions(c.bundlePath(), &archive.TarOptions{
+ Compression: archive.Gzip,
+ IncludeSourceDir: true,
+ IncludeFiles: []string{
+ "checkpoint",
+ "artifacts",
+ "ctr.log",
+ "config.dump",
+ "spec.dump",
+ "network.status"},
+ })
+
+ if err != nil {
+ return errors.Wrapf(err, "error reading checkpoint directory %q", c.ID())
+ }
+
+ outFile, err := os.Create(dest)
+ if err != nil {
+ return errors.Wrapf(err, "error creating checkpoint export file %q", dest)
+ }
+ defer outFile.Close()
+
+ if err := os.Chmod(dest, 0600); err != nil {
+ return errors.Wrapf(err, "cannot chmod %q", dest)
+ }
+
+ _, err = io.Copy(outFile, input)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (c *Container) checkpointRestoreSupported() (err error) {
if !criu.CheckForCriu() {
return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion)
@@ -549,6 +590,12 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return err
}
+ if options.TargetFile != "" {
+ if err = c.exportCheckpoint(options.TargetFile); err != nil {
+ return err
+ }
+ }
+
logrus.Debugf("Checkpointed container %s", c.ID())
if !options.KeepRunning {
@@ -561,15 +608,50 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if !options.Keep {
- // Remove log file
- os.Remove(filepath.Join(c.bundlePath(), "dump.log"))
- // Remove statistic file
- os.Remove(filepath.Join(c.bundlePath(), "stats-dump"))
+ cleanup := []string{
+ "dump.log",
+ "stats-dump",
+ "config.dump",
+ "spec.dump",
+ }
+ for _, delete := range cleanup {
+ file := filepath.Join(c.bundlePath(), delete)
+ os.Remove(file)
+ }
}
return c.save()
}
+func (c *Container) importCheckpoint(input string) (err error) {
+ archiveFile, err := os.Open(input)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
+ }
+
+ defer archiveFile.Close()
+ options := &archive.TarOptions{
+ ExcludePatterns: []string{
+ // config.dump and spec.dump are only required
+ // container creation
+ "config.dump",
+ "spec.dump",
+ },
+ }
+ err = archive.Untar(archiveFile, c.bundlePath(), options)
+ if err != nil {
+ return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ }
+
+ // Make sure the newly created config.json exists on disk
+ g := generate.NewFromSpec(c.config.Spec)
+ if err = c.saveSpec(g.Spec()); err != nil {
+ return errors.Wrap(err, "Saving imported container specification for restore failed")
+ }
+
+ return nil
+}
+
func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
if err := c.checkpointRestoreSupported(); err != nil {
@@ -580,6 +662,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
}
+ if options.TargetFile != "" {
+ if err = c.importCheckpoint(options.TargetFile); err != nil {
+ return err
+ }
+ }
+
// Let's try to stat() CRIU's inventory file. If it does not exist, it makes
// no sense to try a restore. This is a minimal check if a checkpoint exist.
if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) {
@@ -593,7 +681,13 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Read network configuration from checkpoint
// Currently only one interface with one IP is supported.
networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status"))
- if err == nil {
+ // If the restored container should get a new name, the IP address of
+ // the container will not be restored. This assumes that if a new name is
+ // specified, the container is restored multiple times.
+ // TODO: This implicit restoring with or without IP depending on an
+ // unrelated restore parameter (--name) does not seem like the
+ // best solution.
+ if err == nil && options.Name == "" {
// The file with the network.status does exist. Let's restore the
// container with the same IP address as during checkpointing.
defer networkStatusFile.Close()
@@ -637,23 +731,44 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return err
}
+ // Restoring from an import means that we are doing migration
+ if options.TargetFile != "" {
+ g.SetRootPath(c.state.Mountpoint)
+ }
+
// We want to have the same network namespace as before.
if c.config.CreateNetNS {
g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
}
- // Save the OCI spec to disk
- if err := c.saveSpec(g.Spec()); err != nil {
+ if err := c.makeBindMounts(); err != nil {
return err
}
- if err := c.makeBindMounts(); err != nil {
- return err
+ if options.TargetFile != "" {
+ for dstPath, srcPath := range c.state.BindMounts {
+ newMount := spec.Mount{
+ Type: "bind",
+ Source: srcPath,
+ Destination: dstPath,
+ Options: []string{"bind", "private"},
+ }
+ if c.IsReadOnly() && dstPath != "/dev/shm" {
+ newMount.Options = append(newMount.Options, "ro", "nosuid", "noexec", "nodev")
+ }
+ if !MountExists(g.Mounts(), dstPath) {
+ g.AddMount(newMount)
+ }
+ }
}
// Cleanup for a working restore.
c.removeConmonFiles()
+ // Save the OCI spec to disk
+ if err := c.saveSpec(g.Spec()); err != nil {
+ return err
+ }
if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, &options); err != nil {
return err
}
@@ -885,7 +1000,7 @@ func (c *Container) generateResolvConf() (string, error) {
nameservers := resolvconf.GetNameservers(resolv.Content)
// slirp4netns has a built in DNS server.
if c.config.NetMode.IsSlirp4netns() {
- nameservers = append(nameservers, "10.0.2.3")
+ nameservers = append([]string{"10.0.2.3"}, nameservers...)
}
if len(c.config.DNSServer) > 0 {
// We store DNS servers as net.IP, so need to convert to string
diff --git a/libpod/driver/driver.go b/libpod/driver/driver.go
index 717ac2a4d..f9442fa21 100644
--- a/libpod/driver/driver.go
+++ b/libpod/driver/driver.go
@@ -1,10 +1,15 @@
package driver
import (
- "github.com/containers/libpod/pkg/inspect"
cstorage "github.com/containers/storage"
)
+// Data handles the data for a storage driver
+type Data struct {
+ Name string `json:"Name"`
+ Data map[string]string `json:"Data"`
+}
+
// GetDriverName returns the name of the driver for the given store
func GetDriverName(store cstorage.Store) (string, error) {
driver, err := store.GraphDriver()
@@ -24,7 +29,7 @@ func GetDriverMetadata(store cstorage.Store, layerID string) (map[string]string,
}
// GetDriverData returns the Data struct with information of the driver used by the store
-func GetDriverData(store cstorage.Store, layerID string) (*inspect.Data, error) {
+func GetDriverData(store cstorage.Store, layerID string) (*Data, error) {
name, err := GetDriverName(store)
if err != nil {
return nil, err
@@ -33,7 +38,7 @@ func GetDriverData(store cstorage.Store, layerID string) (*inspect.Data, error)
if err != nil {
return nil, err
}
- return &inspect.Data{
+ return &Data{
Name: name,
Data: metaData,
}, nil
diff --git a/libpod/errors.go b/libpod/errors.go
index dd82d0796..cca0935ec 100644
--- a/libpod/errors.go
+++ b/libpod/errors.go
@@ -96,4 +96,7 @@ var (
// ErrOSNotSupported indicates the function is not available on the particular
// OS.
ErrOSNotSupported = errors.New("No support for this OS yet")
+
+ // ErrOCIRuntime indicates an error from the OCI runtime
+ ErrOCIRuntime = errors.New("OCI runtime error")
)
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index 5c48cc8ee..3e36a2c95 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -9,7 +9,6 @@ import (
"strings"
"time"
- "github.com/containers/libpod/pkg/inspect"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -53,6 +52,28 @@ const (
HealthCheckStarting string = "starting"
)
+// HealthCheckResults describes the results/logs from a healthcheck
+type HealthCheckResults struct {
+ // Status healthy or unhealthy
+ Status string `json:"Status"`
+ // FailingStreak is the number of consecutive failed healthchecks
+ FailingStreak int `json:"FailingStreak"`
+ // Log describes healthcheck attempts and results
+ Log []HealthCheckLog `json:"Log"`
+}
+
+// HealthCheckLog describes the results of a single healthcheck
+type HealthCheckLog struct {
+ // Start time as string
+ Start string `json:"Start"`
+ // End time as a string
+ End string `json:"End"`
+ // Exitcode is 0 or 1
+ ExitCode int `json:"ExitCode"`
+ // Output is the stdout/stderr from the healthcheck command
+ Output string `json:"Output"`
+}
+
// hcWriteCloser allows us to use bufio as a WriteCloser
type hcWriteCloser struct {
*bufio.Writer
@@ -157,8 +178,8 @@ func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) {
return HealthCheckDefined, nil
}
-func newHealthCheckLog(start, end time.Time, exitCode int, log string) inspect.HealthCheckLog {
- return inspect.HealthCheckLog{
+func newHealthCheckLog(start, end time.Time, exitCode int, log string) HealthCheckLog {
+ return HealthCheckLog{
Start: start.Format(time.RFC3339Nano),
End: end.Format(time.RFC3339Nano),
ExitCode: exitCode,
@@ -182,7 +203,7 @@ func (c *Container) updateHealthStatus(status string) error {
}
// UpdateHealthCheckLog parses the health check results and writes the log
-func (c *Container) updateHealthCheckLog(hcl inspect.HealthCheckLog, inStartPeriod bool) error {
+func (c *Container) updateHealthCheckLog(hcl HealthCheckLog, inStartPeriod bool) error {
healthCheck, err := c.GetHealthCheckLog()
if err != nil {
return err
@@ -223,8 +244,8 @@ func (c *Container) healthCheckLogPath() string {
// GetHealthCheckLog returns HealthCheck results by reading the container's
// health check log file. If the health check log file does not exist, then
// an empty healthcheck struct is returned
-func (c *Container) GetHealthCheckLog() (inspect.HealthCheckResults, error) {
- var healthCheck inspect.HealthCheckResults
+func (c *Container) GetHealthCheckLog() (HealthCheckResults, error) {
+ var healthCheck HealthCheckResults
if _, err := os.Stat(c.healthCheckLogPath()); os.IsNotExist(err) {
return healthCheck, nil
}
diff --git a/libpod/image/image.go b/libpod/image/image.go
index b965a4640..89a68a1bd 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -659,7 +659,7 @@ func (i *Image) Size(ctx context.Context) (*uint64, error) {
}
// DriverData gets the driver data from the store on a layer
-func (i *Image) DriverData() (*inspect.Data, error) {
+func (i *Image) DriverData() (*driver.Data, error) {
topLayer, err := i.Layer()
if err != nil {
return nil, err
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index b8a916de3..ed9ad5f0d 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -17,7 +17,6 @@ import (
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/libpod/pkg/firewall"
- "github.com/containers/libpod/pkg/inspect"
"github.com/containers/libpod/pkg/netns"
"github.com/containers/libpod/pkg/rootless"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -470,7 +469,7 @@ func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) {
return netStats, err
}
-func (c *Container) getContainerNetworkInfo(data *inspect.ContainerInspectData) *inspect.ContainerInspectData {
+func (c *Container) getContainerNetworkInfo(data *InspectContainerData) *InspectContainerData {
if c.state.NetNS != nil && len(c.state.NetworkStatus) > 0 {
// Report network settings from the first pod network
result := c.state.NetworkStatus[0]
diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go
index 3a8ac4455..1e46ca40b 100644
--- a/libpod/networking_unsupported.go
+++ b/libpod/networking_unsupported.go
@@ -2,10 +2,6 @@
package libpod
-import (
- "github.com/containers/libpod/pkg/inspect"
-)
-
func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
return ErrNotImplemented
}
@@ -22,6 +18,6 @@ func (r *Runtime) createNetNS(ctr *Container) (err error) {
return ErrNotImplemented
}
-func (c *Container) getContainerNetworkInfo(data *inspect.ContainerInspectData) *inspect.ContainerInspectData {
+func (c *Container) getContainerNetworkInfo(data *InspectContainerData) *InspectContainerData {
return nil
}
diff --git a/libpod/oci.go b/libpod/oci.go
index 7138108c5..dcb72fc1b 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -58,6 +58,7 @@ type OCIRuntime struct {
logSizeMax int64
noPivot bool
reservePorts bool
+ supportsJSON bool
}
// syncInfo is used to return data from monitor process to daemon
@@ -66,8 +67,16 @@ type syncInfo struct {
Message string `json:"message,omitempty"`
}
+// ociError is used to parse the OCI runtime JSON log. It is not part of the
+// OCI runtime specifications, it follows what runc does
+type ociError struct {
+ Level string `json:"level,omitempty"`
+ Time string `json:"time,omitempty"`
+ Msg string `json:"msg,omitempty"`
+}
+
// Make a new OCI runtime with provided options
-func newOCIRuntime(oruntime OCIRuntimePath, conmonPath string, conmonEnv []string, cgroupManager string, tmpDir string, logSizeMax int64, noPivotRoot bool, reservePorts bool) (*OCIRuntime, error) {
+func newOCIRuntime(oruntime OCIRuntimePath, conmonPath string, conmonEnv []string, cgroupManager string, tmpDir string, logSizeMax int64, noPivotRoot bool, reservePorts bool, supportsJSON bool) (*OCIRuntime, error) {
runtime := new(OCIRuntime)
runtime.name = oruntime.Name
runtime.path = oruntime.Paths[0]
@@ -78,6 +87,7 @@ func newOCIRuntime(oruntime OCIRuntimePath, conmonPath string, conmonEnv []strin
runtime.logSizeMax = logSizeMax
runtime.noPivot = noPivotRoot
runtime.reservePorts = reservePorts
+ runtime.supportsJSON = supportsJSON
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket")
diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go
index 7c1c18052..9bbefdb06 100644
--- a/libpod/oci_linux.go
+++ b/libpod/oci_linux.go
@@ -6,6 +6,7 @@ import (
"bufio"
"bytes"
"fmt"
+ "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -208,6 +209,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
defer parentPipe.Close()
defer parentStartPipe.Close()
+ ociLog := filepath.Join(ctr.state.RunDir, "oci-log")
+ logLevel := logrus.GetLevel()
+
args := []string{}
if r.cgroupManager == SystemdCgroupsManager {
args = append(args, "-s")
@@ -219,6 +223,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
args = append(args, "-b", ctr.bundlePath())
args = append(args, "-p", filepath.Join(ctr.state.RunDir, "pidfile"))
args = append(args, "--exit-dir", r.exitsDir)
+ if logLevel != logrus.DebugLevel && r.supportsJSON {
+ args = append(args, "--runtime-arg", "--log-format=json", "--runtime-arg", "--log", fmt.Sprintf("--runtime-arg=%s", ociLog))
+ }
if ctr.config.ConmonPidFile != "" {
args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile)
}
@@ -248,7 +255,6 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
args = append(args, "--no-pivot")
}
- logLevel := logrus.GetLevel()
args = append(args, "--log-level", logLevel.String())
if logLevel == logrus.DebugLevel {
@@ -417,8 +423,18 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
}
logrus.Debugf("Received container pid: %d", ss.si.Pid)
if ss.si.Pid == -1 {
+ if r.supportsJSON {
+ data, err := ioutil.ReadFile(ociLog)
+ if err == nil {
+ var ociErr ociError
+ if err := json.Unmarshal(data, &ociErr); err == nil {
+ return errors.Wrapf(ErrOCIRuntime, "%s", strings.Trim(ociErr.Msg, "\n"))
+ }
+ }
+ }
+ // If we failed to parse the JSON errors, then print the output as it is
if ss.si.Message != "" {
- return errors.Wrapf(ErrInternal, "container create failed: %s", ss.si.Message)
+ return errors.Wrapf(ErrOCIRuntime, "%s", ss.si.Message)
}
return errors.Wrapf(ErrInternal, "container create failed")
}
diff --git a/libpod/options.go b/libpod/options.go
index 20aa51981..cdac09654 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1127,6 +1127,8 @@ func WithGroups(groups []string) CtrCreateOption {
// These are not added to the container's spec, but will instead be used during
// commit to populate the volumes of the new image, and to trigger some OCI
// hooks that are only added if volume mounts are present.
+// Furthermore, they are used in the output of inspect, to filter volumes -
+// only volumes included in this list will be included in the output.
// Unless explicitly set, committed images will have no volumes.
// The given volumes slice must not be nil.
func WithUserVolumes(volumes []string) CtrCreateOption {
diff --git a/libpod/pod.go b/libpod/pod.go
index 4ce697402..c319c449f 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -18,7 +18,6 @@ import (
// assume their callers handled this requirement. Generally speaking, if a
// function takes the pod lock and accesses any part of state, it should
// updatePod() immediately after locking.
-// ffjson: skip
// Pod represents a group of containers that may share namespaces
type Pod struct {
config *PodConfig
@@ -30,7 +29,6 @@ type Pod struct {
}
// PodConfig represents a pod's static configuration
-// easyjson:json
type PodConfig struct {
ID string `json:"id"`
Name string `json:"name"`
@@ -66,7 +64,6 @@ type PodConfig struct {
}
// podState represents a pod's state
-// easyjson:json
type podState struct {
// CgroupPath is the path to the pod's CGroup
CgroupPath string `json:"cgroupPath"`
@@ -77,7 +74,6 @@ type podState struct {
// PodInspect represents the data we want to display for
// podman pod inspect
-// easyjson:json
type PodInspect struct {
Config *PodConfig
State *PodInspectState
@@ -85,14 +81,12 @@ type PodInspect struct {
}
// PodInspectState contains inspect data on the pod's state
-// easyjson:json
type PodInspectState struct {
CgroupPath string `json:"cgroupPath"`
InfraContainerID string `json:"infraContainerID"`
}
// PodContainerInfo keeps information on a container in a pod
-// easyjson:json
type PodContainerInfo struct {
ID string `json:"id"`
State string `json:"state"`
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 1f8dd98b4..2c50fce85 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -42,11 +42,20 @@ const (
SQLiteStateStore RuntimeStateStore = iota
// BoltDBStateStore is a state backed by a BoltDB database
BoltDBStateStore RuntimeStateStore = iota
+)
+
+var (
+ // InstallPrefix is the prefix where podman will be installed.
+ // It can be overridden at build time.
+ installPrefix = "/usr/local"
+ // EtcDir is the sysconfdir where podman should look for system config files.
+ // It can be overridden at build time.
+ etcDir = "/etc"
// SeccompDefaultPath defines the default seccomp path
- SeccompDefaultPath = "/usr/share/containers/seccomp.json"
+ SeccompDefaultPath = installPrefix + "/share/containers/seccomp.json"
// SeccompOverridePath if this exists it overrides the default seccomp path
- SeccompOverridePath = "/etc/crio/seccomp.json"
+ SeccompOverridePath = etcDir + "/crio/seccomp.json"
// ConfigPath is the path to the libpod configuration file
// This file is loaded to replace the builtin default config before
@@ -54,11 +63,11 @@ const (
// If it is not present, the builtin default config is used instead
// This path can be overridden when the runtime is created by using
// NewRuntimeFromConfig() instead of NewRuntime()
- ConfigPath = "/usr/share/containers/libpod.conf"
+ ConfigPath = installPrefix + "/share/containers/libpod.conf"
// OverrideConfigPath is the path to an override for the default libpod
// configuration file. If OverrideConfigPath exists, it will be used in
// place of the configuration file pointed to by ConfigPath.
- OverrideConfigPath = "/etc/containers/libpod.conf"
+ OverrideConfigPath = etcDir + "/containers/libpod.conf"
// DefaultInfraImage to use for infra container
DefaultInfraImage = "k8s.gcr.io/pause:3.1"
@@ -151,6 +160,8 @@ type RuntimeConfig struct {
OCIRuntime string `toml:"runtime"`
// OCIRuntimes are the set of configured OCI runtimes (default is runc)
OCIRuntimes map[string][]string `toml:"runtimes"`
+ // RuntimeSupportsJSON is the list of the OCI runtimes that support --format=json
+ RuntimeSupportsJSON []string `toml:"runtime_supports_json"`
// RuntimePath is the path to OCI runtime binary for launching
// containers.
// The first path pointing to a valid file will be used
@@ -250,6 +261,7 @@ type runtimeConfiguredFrom struct {
volPathSet bool
conmonPath bool
conmonEnvVars bool
+ initPath bool
ociRuntimes bool
runtimePath bool
cniPluginDir bool
@@ -297,7 +309,7 @@ func defaultRuntimeConfig() (RuntimeConfig, error) {
TmpDir: "",
MaxLogSize: -1,
NoPivotRoot: false,
- CNIConfigDir: "/etc/cni/net.d/",
+ CNIConfigDir: etcDir + "/cni/net.d/",
CNIPluginDir: []string{"/usr/libexec/cni", "/usr/lib/cni", "/usr/local/lib/cni", "/opt/cni/bin"},
InfraCommand: DefaultInfraCommand,
InfraImage: DefaultInfraImage,
@@ -475,6 +487,9 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
if tmpConfig.ConmonEnvVars != nil {
runtime.configuredFrom.conmonEnvVars = true
}
+ if tmpConfig.InitPath != "" {
+ runtime.configuredFrom.initPath = true
+ }
if tmpConfig.OCIRuntimes != nil {
runtime.configuredFrom.ociRuntimes = true
}
@@ -512,6 +527,9 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
if !runtime.configuredFrom.conmonEnvVars {
runtime.config.ConmonEnvVars = tmpConfig.ConmonEnvVars
}
+ if !runtime.configuredFrom.initPath {
+ runtime.config.InitPath = tmpConfig.InitPath
+ }
if !runtime.configuredFrom.ociRuntimes {
runtime.config.OCIRuntimes = tmpConfig.OCIRuntimes
}
@@ -823,12 +841,21 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
}
+ supportsJSON := false
+ for _, r := range runtime.config.RuntimeSupportsJSON {
+ if r == runtime.config.OCIRuntime {
+ supportsJSON = true
+ break
+ }
+ }
+
// Make an OCI runtime to perform container operations
ociRuntime, err := newOCIRuntime(runtime.ociRuntimePath,
runtime.conmonPath, runtime.config.ConmonEnvVars,
runtime.config.CgroupManager, runtime.config.TmpDir,
runtime.config.MaxLogSize, runtime.config.NoPivotRoot,
- runtime.config.EnablePortReservation)
+ runtime.config.EnablePortReservation,
+ supportsJSON)
if err != nil {
return err
}
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
new file mode 100644
index 000000000..569f63322
--- /dev/null
+++ b/libpod/runtime_cstorage.go
@@ -0,0 +1,118 @@
+package libpod
+
+import (
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// StorageContainer represents a container present in c/storage but not in
+// libpod.
+type StorageContainer struct {
+ ID string
+ Names []string
+ PresentInLibpod bool
+}
+
+// ListStorageContainers lists all containers visible to c/storage.
+func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ finalCtrs := []*StorageContainer{}
+
+ ctrs, err := r.store.Containers()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, ctr := range ctrs {
+ storageCtr := new(StorageContainer)
+ storageCtr.ID = ctr.ID
+ storageCtr.Names = ctr.Names
+
+ // Look up if container is in state
+ hasCtr, err := r.state.HasContainer(ctr.ID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up container %s in state", ctr.ID)
+ }
+
+ storageCtr.PresentInLibpod = hasCtr
+
+ finalCtrs = append(finalCtrs, storageCtr)
+ }
+
+ return finalCtrs, nil
+}
+
+// RemoveStorageContainer removes a container from c/storage.
+// The container WILL NOT be removed if it exists in libpod.
+// Accepts ID or full name of container.
+// If force is set, the container will be unmounted first to ensure removal.
+func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ targetID, err := r.store.Lookup(idOrName)
+ if err != nil {
+ if err == storage.ErrLayerUnknown {
+ return errors.Wrapf(ErrNoSuchCtr, "no container with ID or name %q found", idOrName)
+ }
+ return errors.Wrapf(err, "error looking up container %q", idOrName)
+ }
+
+ // Lookup returns an ID but it's not guaranteed to be a container ID.
+ // So we can still error here.
+ ctr, err := r.store.Container(targetID)
+ if err != nil {
+ if err == storage.ErrContainerUnknown {
+ return errors.Wrapf(ErrNoSuchCtr, "%q does not refer to a container", idOrName)
+ }
+ return errors.Wrapf(err, "error retrieving container %q", idOrName)
+ }
+
+ // Error out if the container exists in libpod
+ exists, err := r.state.HasContainer(ctr.ID)
+ if err != nil {
+ return err
+ }
+ if exists {
+ return errors.Wrapf(ErrCtrExists, "refusing to remove %q as it exists in libpod as container %s", idOrName, ctr.ID)
+ }
+
+ if !force {
+ timesMounted, err := r.store.Mounted(ctr.ID)
+ if err != nil {
+ if err == storage.ErrContainerUnknown {
+ // Container was removed from under us.
+ // It's gone, so don't bother erroring.
+ logrus.Warnf("Storage for container %s already removed", ctr.ID)
+ return nil
+ }
+ return errors.Wrapf(err, "error looking up container %q mounts", idOrName)
+ }
+ if timesMounted > 0 {
+ return errors.Wrapf(ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName)
+ }
+ } else {
+ if _, err := r.store.Unmount(ctr.ID, true); err != nil {
+ if err == storage.ErrContainerUnknown {
+ // Container again gone, no error
+ logrus.Warnf("Storage for container %s already removed", ctr.ID)
+ return nil
+ }
+ return errors.Wrapf(err, "error unmounting container %q", idOrName)
+ }
+ }
+
+ if err := r.store.DeleteContainer(ctr.ID); err != nil {
+ if err == storage.ErrContainerUnknown {
+ // Container again gone, no error
+ logrus.Warnf("Storage for container %s already removed", ctr.ID)
+ return nil
+ }
+ return errors.Wrapf(err, "error removing storage for container %q", idOrName)
+ }
+
+ return nil
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 0c8d3edab..0871b83a7 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -9,11 +9,10 @@ import (
"time"
"github.com/containers/libpod/libpod/events"
- "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
- "github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -34,7 +33,7 @@ type CtrCreateOption func(*Container) error
// A true return will include the container, a false return will exclude it.
type ContainerFilter func(*Container) bool
-// NewContainer creates a new container from a given OCI config
+// NewContainer creates a new container from a given OCI config.
func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
r.lock.Lock()
defer r.lock.Unlock()
@@ -44,20 +43,46 @@ func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options ..
return r.newContainer(ctx, rSpec, options...)
}
-func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
- span, _ := opentracing.StartSpanFromContext(ctx, "newContainer")
- span.SetTag("type", "runtime")
- defer span.Finish()
+// RestoreContainer re-creates a container from an imported checkpoint
+func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ if !r.valid {
+ return nil, ErrRuntimeStopped
+ }
+
+ ctr, err := r.initContainerVariables(rSpec, config)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error initializing container variables")
+ }
+ return r.setupContainer(ctx, ctr, true)
+}
+func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) {
if rSpec == nil {
return nil, errors.Wrapf(ErrInvalidArg, "must provide a valid runtime spec to create container")
}
-
ctr := new(Container)
ctr.config = new(ContainerConfig)
ctr.state = new(ContainerState)
- ctr.config.ID = stringid.GenerateNonCryptoID()
+ if config == nil {
+ ctr.config.ID = stringid.GenerateNonCryptoID()
+ ctr.config.ShmSize = DefaultShmSize
+ } else {
+ // This is a restore from an imported checkpoint
+ if err := JSONDeepCopy(config, ctr.config); err != nil {
+ return nil, errors.Wrapf(err, "error copying container config for restore")
+ }
+ // If the ID is empty a new name for the restored container was requested
+ if ctr.config.ID == "" {
+ ctr.config.ID = stringid.GenerateNonCryptoID()
+ // Fixup ExitCommand with new ID
+ ctr.config.ExitCommand[len(ctr.config.ExitCommand)-1] = ctr.config.ID
+ }
+ // Reset the log path to point to the default
+ ctr.config.LogPath = ""
+ }
ctr.config.Spec = new(spec.Spec)
if err := JSONDeepCopy(rSpec, ctr.config.Spec); err != nil {
@@ -65,8 +90,6 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
ctr.config.CreatedTime = time.Now()
- ctr.config.ShmSize = DefaultShmSize
-
ctr.state.BindMounts = make(map[string]string)
ctr.config.StopTimeout = CtrRemoveTimeout
@@ -80,12 +103,29 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
ctr.runtime = r
+
+ return ctr, nil
+}
+
+func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "newContainer")
+ span.SetTag("type", "runtime")
+ defer span.Finish()
+
+ ctr, err := r.initContainerVariables(rSpec, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error initializing container variables")
+ }
+
for _, option := range options {
if err := option(ctr); err != nil {
return nil, errors.Wrapf(err, "error running container create option")
}
}
+ return r.setupContainer(ctx, ctr, false)
+}
+func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bool) (c *Container, err error) {
// Allocate a lock for the container
lock, err := r.lockManager.AllocateLock()
if err != nil {
@@ -154,6 +194,19 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
return nil, errors.Wrapf(ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager)
}
+ if restore {
+ // Remove information about bind mount
+ // for new container from imported checkpoint
+ g := generate.Generator{Config: ctr.config.Spec}
+ g.RemoveMount("/dev/shm")
+ ctr.config.ShmDir = ""
+ g.RemoveMount("/etc/resolv.conf")
+ g.RemoveMount("/etc/hostname")
+ g.RemoveMount("/etc/hosts")
+ g.RemoveMount("/run/.containerenv")
+ g.RemoveMount("/run/secrets")
+ }
+
// Set up storage for the container
if err := ctr.setupStorage(ctx); err != nil {
return nil, err
@@ -559,16 +612,3 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
}
return ctrs[lastCreatedIndex], nil
}
-
-// RemoveContainersFromStorage attempt to remove containers from storage that do not exist in libpod database
-func (r *Runtime) RemoveContainersFromStorage(ctrs []string) {
- for _, i := range ctrs {
- // if the container does not exist in database, attempt to remove it from storage
- if _, err := r.LookupContainer(i); err != nil && errors.Cause(err) == image.ErrNoSuchCtr {
- r.storageService.UnmountContainerImage(i, true)
- if err := r.storageService.DeleteContainer(i); err != nil && errors.Cause(err) != storage.ErrContainerUnknown {
- logrus.Errorf("Failed to remove container %q from storage: %s", i, err)
- }
- }
- }
-}
diff --git a/libpod/volume.go b/libpod/volume.go
index 0b37d44ef..9ed2ff087 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -10,7 +10,6 @@ type Volume struct {
}
// VolumeConfig holds the volume's config information
-//easyjson:json
type VolumeConfig struct {
// Name of the volume
Name string `json:"name"`