summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/adapter/client.go47
-rw-r--r--libpod/adapter/containers_remote.go50
-rw-r--r--libpod/adapter/images_remote.go24
-rw-r--r--libpod/adapter/info_remote.go56
-rw-r--r--libpod/adapter/runtime.go157
-rw-r--r--libpod/adapter/runtime_remote.go434
-rw-r--r--libpod/boltdb_state.go92
-rw-r--r--libpod/boltdb_state_internal.go7
-rw-r--r--libpod/container.go15
-rw-r--r--libpod/container_api.go176
-rw-r--r--libpod/container_attach_linux.go4
-rw-r--r--libpod/container_commit.go2
-rw-r--r--libpod/container_graph.go97
-rw-r--r--libpod/container_internal.go337
-rw-r--r--libpod/container_internal_linux.go69
-rw-r--r--libpod/errors.go16
-rw-r--r--libpod/image/image.go76
-rw-r--r--libpod/image/pull.go13
-rw-r--r--libpod/image/search.go277
-rw-r--r--libpod/image/utils.go43
-rw-r--r--libpod/in_memory_state.go64
-rw-r--r--libpod/kube.go16
-rw-r--r--libpod/lock/in_memory_locks.go11
-rw-r--r--libpod/lock/lock.go14
-rw-r--r--libpod/lock/shm/shm_lock.c36
-rw-r--r--libpod/lock/shm/shm_lock.go16
-rw-r--r--libpod/lock/shm/shm_lock.h1
-rw-r--r--libpod/lock/shm/shm_lock_test.go28
-rw-r--r--libpod/lock/shm_lock_manager_linux.go7
-rw-r--r--libpod/lock/shm_lock_manager_unsupported.go5
-rw-r--r--libpod/oci.go1
-rw-r--r--libpod/options.go24
-rw-r--r--libpod/pod_internal.go96
-rw-r--r--libpod/runtime.go79
-rw-r--r--libpod/runtime_ctr.go53
-rw-r--r--libpod/runtime_img.go46
-rw-r--r--libpod/runtime_pod_linux.go9
-rw-r--r--libpod/runtime_renumber.go57
-rw-r--r--libpod/runtime_volume.go80
-rw-r--r--libpod/runtime_volume_linux.go31
-rw-r--r--libpod/state.go24
-rw-r--r--libpod/state_test.go72
-rw-r--r--libpod/storage.go5
-rw-r--r--libpod/volume.go5
-rw-r--r--libpod/volume_internal.go7
45 files changed, 1543 insertions, 1236 deletions
diff --git a/libpod/adapter/client.go b/libpod/adapter/client.go
deleted file mode 100644
index 6512a5952..000000000
--- a/libpod/adapter/client.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "os"
-
- "github.com/sirupsen/logrus"
- "github.com/varlink/go/varlink"
-)
-
-// DefaultAddress is the default address of the varlink socket
-const DefaultAddress = "unix:/run/podman/io.podman"
-
-// Connect provides a varlink connection
-func (r RemoteRuntime) Connect() (*varlink.Connection, error) {
- var err error
- var connection *varlink.Connection
- if bridge := os.Getenv("PODMAN_VARLINK_BRIDGE"); bridge != "" {
- logrus.Infof("Connecting with varlink bridge")
- logrus.Debugf("%s", bridge)
- connection, err = varlink.NewBridge(bridge)
- } else {
- address := os.Getenv("PODMAN_VARLINK_ADDRESS")
- if address == "" {
- address = DefaultAddress
- }
- logrus.Infof("Connecting with varlink address")
- logrus.Debugf("%s", address)
- connection, err = varlink.NewConnection(address)
- }
- if err != nil {
- return nil, err
- }
- return connection, nil
-}
-
-// RefreshConnection is used to replace the current r.Conn after things like
-// using an upgraded varlink connection
-func (r RemoteRuntime) RefreshConnection() error {
- newConn, err := r.Connect()
- if err != nil {
- return err
- }
- r.Conn = newConn
- return nil
-}
diff --git a/libpod/adapter/containers_remote.go b/libpod/adapter/containers_remote.go
deleted file mode 100644
index 9623304e5..000000000
--- a/libpod/adapter/containers_remote.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "encoding/json"
-
- iopodman "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/inspect"
-)
-
-// Inspect returns an inspect struct from varlink
-func (c *Container) Inspect(size bool) (*inspect.ContainerInspectData, error) {
- reply, err := iopodman.ContainerInspectData().Call(c.Runtime.Conn, c.ID())
- if err != nil {
- return nil, err
- }
- data := inspect.ContainerInspectData{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return &data, err
-}
-
-// ID returns the ID of the container
-func (c *Container) ID() string {
- return c.config.ID
-}
-
-// GetArtifact returns a container's artifacts
-func (c *Container) GetArtifact(name string) ([]byte, error) {
- var data []byte
- reply, err := iopodman.ContainerArtifacts().Call(c.Runtime.Conn, c.ID(), name)
- if err != nil {
- return nil, err
- }
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return data, err
-}
-
-// Config returns a container's Config ... same as ctr.Config()
-func (c *Container) Config() *libpod.ContainerConfig {
- if c.config != nil {
- return c.config
- }
- return c.Runtime.Config(c.ID())
-}
diff --git a/libpod/adapter/images_remote.go b/libpod/adapter/images_remote.go
deleted file mode 100644
index e7b38dccc..000000000
--- a/libpod/adapter/images_remote.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "context"
- "encoding/json"
-
- iopodman "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/pkg/inspect"
-)
-
-// Inspect returns returns an ImageData struct from over a varlink connection
-func (i *ContainerImage) Inspect(ctx context.Context) (*inspect.ImageData, error) {
- reply, err := iopodman.InspectImage().Call(i.Runtime.Conn, i.ID())
- if err != nil {
- return nil, err
- }
- data := inspect.ImageData{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return &data, nil
-}
diff --git a/libpod/adapter/info_remote.go b/libpod/adapter/info_remote.go
deleted file mode 100644
index 3b691ed17..000000000
--- a/libpod/adapter/info_remote.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "encoding/json"
-
- "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
-)
-
-// Info returns information for the host system and its components
-func (r RemoteRuntime) Info() ([]libpod.InfoData, error) {
- // TODO the varlink implementation for info should be updated to match the output for regular info
- var (
- reply []libpod.InfoData
- hostInfo map[string]interface{}
- store map[string]interface{}
- )
-
- registries := make(map[string]interface{})
- insecureRegistries := make(map[string]interface{})
- conn, err := r.Connect()
- if err != nil {
- return nil, err
- }
- defer conn.Close()
- info, err := iopodman.GetInfo().Call(conn)
- if err != nil {
- return nil, err
- }
-
- // info.host -> map[string]interface{}
- h, err := json.Marshal(info.Host)
- if err != nil {
- return nil, err
- }
- json.Unmarshal(h, &hostInfo)
-
- // info.store -> map[string]interface{}
- s, err := json.Marshal(info.Store)
- if err != nil {
- return nil, err
- }
- json.Unmarshal(s, &store)
-
- registries["registries"] = info.Registries
- insecureRegistries["registries"] = info.Insecure_registries
-
- // Add everything to the reply
- reply = append(reply, libpod.InfoData{Type: "host", Data: hostInfo})
- reply = append(reply, libpod.InfoData{Type: "registries", Data: registries})
- reply = append(reply, libpod.InfoData{Type: "insecure registries", Data: insecureRegistries})
- reply = append(reply, libpod.InfoData{Type: "store", Data: store})
- return reply, nil
-}
diff --git a/libpod/adapter/runtime.go b/libpod/adapter/runtime.go
deleted file mode 100644
index 2c408dd2f..000000000
--- a/libpod/adapter/runtime.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// +build !remoteclient
-
-package adapter
-
-import (
- "context"
- "github.com/pkg/errors"
- "io"
- "io/ioutil"
- "os"
- "strconv"
-
- "github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/image"
- "github.com/containers/libpod/pkg/rootless"
- "github.com/urfave/cli"
-)
-
-// LocalRuntime describes a typical libpod runtime
-type LocalRuntime struct {
- *libpod.Runtime
- Remote bool
-}
-
-// ContainerImage ...
-type ContainerImage struct {
- *image.Image
-}
-
-// Container ...
-type Container struct {
- *libpod.Container
-}
-
-// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cli.Context) (*LocalRuntime, error) {
- runtime, err := libpodruntime.GetRuntime(c)
- if err != nil {
- return nil, err
- }
- return &LocalRuntime{
- Runtime: runtime,
- }, nil
-}
-
-// GetImages returns a slice of images in containerimages
-func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
- var containerImages []*ContainerImage
- images, err := r.Runtime.ImageRuntime().GetImages()
- if err != nil {
- return nil, err
- }
- for _, i := range images {
- containerImages = append(containerImages, &ContainerImage{i})
- }
- return containerImages, nil
-
-}
-
-// NewImageFromLocal returns a containerimage representation of a image from local storage
-func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
- img, err := r.Runtime.ImageRuntime().NewFromLocal(name)
- if err != nil {
- return nil, err
- }
- return &ContainerImage{img}, nil
-}
-
-// LoadFromArchiveReference calls into local storage to load an image from an archive
-func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
- var containerImages []*ContainerImage
- imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer)
- if err != nil {
- return nil, err
- }
- for _, i := range imgs {
- ci := ContainerImage{i}
- containerImages = append(containerImages, &ci)
- }
- return containerImages, nil
-}
-
-// New calls into local storage to look for an image in local storage or to pull it
-func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
- img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, forcePull, label)
- if err != nil {
- return nil, err
- }
- return &ContainerImage{img}, nil
-}
-
-// RemoveImage calls into local storage and removes an image
-func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
- return r.Runtime.RemoveImage(ctx, img.Image, force)
-}
-
-// LookupContainer ...
-func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
- ctr, err := r.Runtime.LookupContainer(idOrName)
- if err != nil {
- return nil, err
- }
- return &Container{ctr}, nil
-}
-
-// PruneImages is wrapper into PruneImages within the image pkg
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
- return r.ImageRuntime().PruneImages(all)
-}
-
-// Export is a wrapper to container export to a tarfile
-func (r *LocalRuntime) Export(name string, path string) error {
- ctr, err := r.Runtime.LookupContainer(name)
- if err != nil {
- return errors.Wrapf(err, "error looking up container %q", name)
- }
- if os.Geteuid() != 0 {
- state, err := ctr.State()
- if err != nil {
- return errors.Wrapf(err, "cannot read container state %q", ctr.ID())
- }
- if state == libpod.ContainerStateRunning || state == libpod.ContainerStatePaused {
- data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
- if err != nil {
- return errors.Wrapf(err, "cannot read conmon PID file %q", ctr.Config().ConmonPidFile)
- }
- conmonPid, err := strconv.Atoi(string(data))
- if err != nil {
- return errors.Wrapf(err, "cannot parse PID %q", data)
- }
- became, ret, err := rootless.JoinDirectUserAndMountNS(uint(conmonPid))
- if err != nil {
- return err
- }
- if became {
- os.Exit(ret)
- }
- } else {
- became, ret, err := rootless.BecomeRootInUserNS()
- if err != nil {
- return err
- }
- if became {
- os.Exit(ret)
- }
- }
- }
-
- return ctr.Export(path)
-}
-
-// Import is a wrapper to import a container image
-func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
- return r.Runtime.Import(ctx, source, reference, changes, history, quiet)
-}
diff --git a/libpod/adapter/runtime_remote.go b/libpod/adapter/runtime_remote.go
deleted file mode 100644
index c73e98c95..000000000
--- a/libpod/adapter/runtime_remote.go
+++ /dev/null
@@ -1,434 +0,0 @@
-// +build remoteclient
-
-package adapter
-
-import (
- "bufio"
- "context"
- "encoding/json"
- "fmt"
- "github.com/pkg/errors"
- "io"
- "os"
- "strings"
- "time"
-
- "github.com/containers/image/types"
- "github.com/containers/libpod/cmd/podman/varlink"
- "github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/image"
- "github.com/opencontainers/go-digest"
- "github.com/sirupsen/logrus"
- "github.com/urfave/cli"
- "github.com/varlink/go/varlink"
-)
-
-// ImageRuntime is wrapper for image runtime
-type RemoteImageRuntime struct{}
-
-// RemoteRuntime describes a wrapper runtime struct
-type RemoteRuntime struct {
- Conn *varlink.Connection
- Remote bool
-}
-
-// LocalRuntime describes a typical libpod runtime
-type LocalRuntime struct {
- *RemoteRuntime
-}
-
-// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
-func GetRuntime(c *cli.Context) (*LocalRuntime, error) {
- runtime := RemoteRuntime{}
- conn, err := runtime.Connect()
- if err != nil {
- return nil, err
- }
- rr := RemoteRuntime{
- Conn: conn,
- Remote: true,
- }
- foo := LocalRuntime{
- &rr,
- }
- return &foo, nil
-}
-
-// Shutdown is a bogus wrapper for compat with the libpod runtime
-func (r RemoteRuntime) Shutdown(force bool) error {
- return nil
-}
-
-// ContainerImage
-type ContainerImage struct {
- remoteImage
-}
-
-type remoteImage struct {
- ID string
- Labels map[string]string
- RepoTags []string
- RepoDigests []string
- Parent string
- Size int64
- Created time.Time
- InputName string
- Names []string
- Digest digest.Digest
- isParent bool
- Runtime *LocalRuntime
-}
-
-// Container ...
-type Container struct {
- remoteContainer
-}
-
-// remoteContainer ....
-type remoteContainer struct {
- Runtime *LocalRuntime
- config *libpod.ContainerConfig
- state *libpod.ContainerState
-}
-
-// GetImages returns a slice of containerimages over a varlink connection
-func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
- var newImages []*ContainerImage
- images, err := iopodman.ListImages().Call(r.Conn)
- if err != nil {
- return nil, err
- }
- for _, i := range images {
- name := i.Id
- if len(i.RepoTags) > 1 {
- name = i.RepoTags[0]
- }
- newImage, err := imageInListToContainerImage(i, name, r)
- if err != nil {
- return nil, err
- }
- newImages = append(newImages, newImage)
- }
- return newImages, nil
-}
-
-func imageInListToContainerImage(i iopodman.ImageInList, name string, runtime *LocalRuntime) (*ContainerImage, error) {
- created, err := splitStringDate(i.Created)
- if err != nil {
- return nil, err
- }
- ri := remoteImage{
- InputName: name,
- ID: i.Id,
- Labels: i.Labels,
- RepoTags: i.RepoTags,
- RepoDigests: i.RepoTags,
- Parent: i.ParentId,
- Size: i.Size,
- Created: created,
- Names: i.RepoTags,
- isParent: i.IsParent,
- Runtime: runtime,
- }
- return &ContainerImage{ri}, nil
-}
-
-// NewImageFromLocal returns a container image representation of a image over varlink
-func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
- img, err := iopodman.GetImage().Call(r.Conn, name)
- if err != nil {
- return nil, err
- }
- return imageInListToContainerImage(img, name, r)
-
-}
-
-// LoadFromArchiveReference creates an image from a local archive
-func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
- // TODO We need to find a way to leak certDir, creds, and the tlsverify into this function, normally this would
- // come from cli options but we don't want want those in here either.
- imageID, err := iopodman.PullImage().Call(r.Conn, srcRef.DockerReference().String(), "", "", signaturePolicyPath, true)
- if err != nil {
- return nil, err
- }
- newImage, err := r.NewImageFromLocal(imageID)
- if err != nil {
- return nil, err
- }
- return []*ContainerImage{newImage}, nil
-}
-
-// New calls into local storage to look for an image in local storage or to pull it
-func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool, label *string) (*ContainerImage, error) {
- if label != nil {
- return nil, errors.New("the remote client function does not support checking a remote image for a label")
- }
- // TODO Creds needs to be figured out here too, like above
- tlsBool := dockeroptions.DockerInsecureSkipTLSVerify
- // Remember SkipTlsVerify is the opposite of tlsverify
- // If tlsBook is true or undefined, we do not skip
- SkipTlsVerify := false
- if tlsBool == types.OptionalBoolFalse {
- SkipTlsVerify = true
- }
- imageID, err := iopodman.PullImage().Call(r.Conn, name, dockeroptions.DockerCertPath, "", signaturePolicyPath, SkipTlsVerify)
- if err != nil {
- return nil, err
- }
- newImage, err := r.NewImageFromLocal(imageID)
- if err != nil {
- return nil, err
- }
- return newImage, nil
-}
-
-func splitStringDate(d string) (time.Time, error) {
- fields := strings.Fields(d)
- t := fmt.Sprintf("%sT%sZ", fields[0], fields[1])
- return time.ParseInLocation(time.RFC3339Nano, t, time.UTC)
-}
-
-// IsParent goes through the layers in the store and checks if i.TopLayer is
-// the parent of any other layer in store. Double check that image with that
-// layer exists as well.
-func (ci *ContainerImage) IsParent() (bool, error) {
- return ci.remoteImage.isParent, nil
-}
-
-// ID returns the image ID as a string
-func (ci *ContainerImage) ID() string {
- return ci.remoteImage.ID
-}
-
-// Names returns a string array of names associated with the image
-func (ci *ContainerImage) Names() []string {
- return ci.remoteImage.Names
-}
-
-// Created returns the time the image was created
-func (ci *ContainerImage) Created() time.Time {
- return ci.remoteImage.Created
-}
-
-// Size returns the size of the image
-func (ci *ContainerImage) Size(ctx context.Context) (*uint64, error) {
- usize := uint64(ci.remoteImage.Size)
- return &usize, nil
-}
-
-// Digest returns the image's digest
-func (ci *ContainerImage) Digest() digest.Digest {
- return ci.remoteImage.Digest
-}
-
-// Labels returns a map of the image's labels
-func (ci *ContainerImage) Labels(ctx context.Context) (map[string]string, error) {
- return ci.remoteImage.Labels, nil
-}
-
-// Dangling returns a bool if the image is "dangling"
-func (ci *ContainerImage) Dangling() bool {
- return len(ci.Names()) == 0
-}
-
-// TagImage ...
-func (ci *ContainerImage) TagImage(tag string) error {
- _, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag)
- return err
-}
-
-// RemoveImage calls varlink to remove an image
-func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
- return iopodman.RemoveImage().Call(r.Conn, img.InputName, force)
-}
-
-// History returns the history of an image and its layers
-func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) {
- var imageHistories []*image.History
-
- reply, err := iopodman.HistoryImage().Call(ci.Runtime.Conn, ci.InputName)
- if err != nil {
- return nil, err
- }
- for _, h := range reply {
- created, err := splitStringDate(h.Created)
- if err != nil {
- return nil, err
- }
- ih := image.History{
- ID: h.Id,
- Created: &created,
- CreatedBy: h.CreatedBy,
- Size: h.Size,
- Comment: h.Comment,
- }
- imageHistories = append(imageHistories, &ih)
- }
- return imageHistories, nil
-}
-
-// LookupContainer gets basic information about container over a varlink
-// connection and then translates it to a *Container
-func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
- state, err := r.ContainerState(idOrName)
- if err != nil {
- return nil, err
- }
- config := r.Config(idOrName)
- if err != nil {
- return nil, err
- }
-
- rc := remoteContainer{
- r,
- config,
- state,
- }
-
- c := Container{
- rc,
- }
- return &c, nil
-}
-
-func (r *LocalRuntime) GetLatestContainer() (*Container, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// ContainerState returns the "state" of the container.
-func (r *LocalRuntime) ContainerState(name string) (*libpod.ContainerState, error) { //no-lint
- reply, err := iopodman.ContainerStateData().Call(r.Conn, name)
- if err != nil {
- return nil, err
- }
- data := libpod.ContainerState{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- return nil, err
- }
- return &data, err
-
-}
-
-// Config returns a container config
-func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig {
- // TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer
- // further looking into it for after devconf.
- // The libpod function for this has no errors so we are kind of in a tough
- // spot here. Logging the errors for now.
- reply, err := iopodman.ContainerConfig().Call(r.Conn, name)
- if err != nil {
- logrus.Error("call to container.config failed")
- }
- data := libpod.ContainerConfig{}
- if err := json.Unmarshal([]byte(reply), &data); err != nil {
- logrus.Error("failed to unmarshal container inspect data")
- }
- return &data
-
-}
-
-// PruneImages is the wrapper call for a remote-client to prune images
-func (r *LocalRuntime) PruneImages(all bool) ([]string, error) {
- return iopodman.ImagesPrune().Call(r.Conn, all)
-}
-
-// Export is a wrapper to container export to a tarfile
-func (r *LocalRuntime) Export(name string, path string) error {
- tempPath, err := iopodman.ExportContainer().Call(r.Conn, name, "")
- if err != nil {
- return err
- }
-
- outputFile, err := os.Create(path)
- if err != nil {
- return err
- }
- defer outputFile.Close()
-
- writer := bufio.NewWriter(outputFile)
- defer writer.Flush()
-
- reply, err := iopodman.ReceiveFile().Send(r.Conn, varlink.Upgrade, tempPath, true)
- if err != nil {
- return err
- }
-
- length, _, err := reply()
- if err != nil {
- return errors.Wrap(err, "unable to get file length for transfer")
- }
-
- reader := r.Conn.Reader
- if _, err := io.CopyN(writer, reader, length); err != nil {
- return errors.Wrap(err, "file transer failed")
- }
-
- return nil
-}
-
-// Import implements the remote calls required to import a container image to the store
-func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
- // First we send the file to the host
- fs, err := os.Open(source)
- if err != nil {
- return "", err
- }
-
- fileInfo, err := fs.Stat()
- if err != nil {
- return "", err
- }
- reply, err := iopodman.SendFile().Send(r.Conn, varlink.Upgrade, "", int64(fileInfo.Size()))
- if err != nil {
- return "", err
- }
- _, _, err = reply()
- if err != nil {
- return "", err
- }
-
- reader := bufio.NewReader(fs)
- _, err = reader.WriteTo(r.Conn.Writer)
- if err != nil {
- return "", err
- }
- r.Conn.Writer.Flush()
-
- // All was sent, wait for the ACK from the server
- tempFile, err := r.Conn.Reader.ReadString(':')
- if err != nil {
- return "", err
- }
-
- // r.Conn is kaput at this point due to the upgrade
- if err := r.RemoteRuntime.RefreshConnection(); err != nil {
- return "", err
-
- }
- return iopodman.ImportImage().Call(r.Conn, strings.TrimRight(tempFile, ":"), reference, history, changes, true)
-}
-
-// GetAllVolumes retrieves all the volumes
-func (r *LocalRuntime) GetAllVolumes() ([]*libpod.Volume, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// RemoveVolume removes a volumes
-func (r *LocalRuntime) RemoveVolume(ctx context.Context, v *libpod.Volume, force, prune bool) error {
- return libpod.ErrNotImplemented
-}
-
-// GetContainers retrieves all containers from the state
-// Filters can be provided which will determine what containers are included in
-// the output. Multiple filters are handled by ANDing their output, so only
-// containers matching all filters are returned
-func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libpod.Container, error) {
- return nil, libpod.ErrNotImplemented
-}
-
-// RemoveContainer removes the given container
-// If force is specified, the container will be stopped first
-// Otherwise, RemoveContainer will return an error if the container is running
-func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force bool) error {
- return libpod.ErrNotImplemented
-}
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 5bc15dd7f..25ef5cd0e 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -783,6 +783,94 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
+// RewriteContainerConfig rewrites a container's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ ctrBkt, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
+ if ctrDB == nil {
+ ctr.valid = false
+ return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ }
+
+ if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
+// RewritePodConfig rewrites a pod's configuration.
+// WARNING: This function is DANGEROUS. Do not use without reading the full
+// comment on this function in state.go.
+func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
+ if !s.valid {
+ return ErrDBClosed
+ }
+
+ if !pod.valid {
+ return ErrPodRemoved
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.closeDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ podBkt, err := getPodBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ podDB := podBkt.Bucket([]byte(pod.ID()))
+ if podDB == nil {
+ pod.valid = false
+ return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
+ }
+
+ if err := podDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating pod %s config JSON", pod.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
@@ -1281,10 +1369,6 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return ErrDBClosed
}
- if !volume.valid {
- return ErrVolumeRemoved
- }
-
volName := []byte(volume.Name())
db, err := s.getDBCon()
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index ea150cfac..3d749849d 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -348,13 +348,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
- // Get the lock
- lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
- if err != nil {
- return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
- }
- volume.lock = lock
-
volume.runtime = s.runtime
volume.valid = true
diff --git a/libpod/container.go b/libpod/container.go
index b0589be3b..75f4a4a4f 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -358,8 +358,7 @@ type ContainerConfig struct {
ExitCommand []string `json:"exitCommand,omitempty"`
// LocalVolumes are the built-in volumes we get from the --volumes-from flag
// It picks up the built-in volumes of the container used by --volumes-from
- LocalVolumes []string
-
+ LocalVolumes []spec.Mount
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
IsInfra bool `json:"pause"`
@@ -557,8 +556,16 @@ func (c *Container) NewNetNS() bool {
// PortMappings returns the ports that will be mapped into a container if
// a new network namespace is created
// If NewNetNS() is false, this value is unused
-func (c *Container) PortMappings() []ocicni.PortMapping {
- return c.config.PortMappings
+func (c *Container) PortMappings() ([]ocicni.PortMapping, error) {
+ // First check if the container belongs to a network namespace (like a pod)
+ if len(c.config.NetNsCtr) > 0 {
+ netNsCtr, err := c.runtime.LookupContainer(c.config.NetNsCtr)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to lookup network namespace for container %s", c.ID())
+ }
+ return netNsCtr.PortMappings()
+ }
+ return c.config.PortMappings, nil
}
// DNSServers returns DNS servers that will be used in the container's
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 149867759..09d7f220d 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -7,7 +7,6 @@ import (
"io/ioutil"
"os"
"strconv"
- "strings"
"time"
"github.com/containers/libpod/libpod/driver"
@@ -15,6 +14,7 @@ import (
"github.com/containers/libpod/pkg/lookup"
"github.com/containers/storage/pkg/stringid"
"github.com/docker/docker/daemon/caps"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
@@ -23,6 +23,10 @@ import (
// Init creates a container in the OCI runtime
func (c *Container) Init(ctx context.Context) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "containerInit")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -38,24 +42,15 @@ func (c *Container) Init(ctx context.Context) (err error) {
return errors.Wrapf(ErrCtrExists, "container %s has already been created in runtime", c.ID())
}
- notRunning, err := c.checkDependenciesRunning()
- if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
- }
- if len(notRunning) > 0 {
- depString := strings.Join(notRunning, ",")
- return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ // don't recursively start
+ if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ return err
}
- defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
- }
- }
- }()
-
if err := c.prepare(); err != nil {
+ if err2 := c.cleanup(ctx); err2 != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ }
return err
}
@@ -68,13 +63,18 @@ func (c *Container) Init(ctx context.Context) (err error) {
return c.init(ctx)
}
-// Start starts a container
-// Start can start configured, created or stopped containers
+// Start starts a container.
+// Start can start configured, created or stopped containers.
// For configured containers, the container will be initialized first, then
-// started
+// started.
// Stopped containers will be deleted and re-created in runc, undergoing a fresh
-// Init()
-func (c *Container) Start(ctx context.Context) (err error) {
+// Init().
+// If recursive is set, Start will also start all containers this container depends on.
+func (c *Container) Start(ctx context.Context, recursive bool) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "containerStart")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -83,64 +83,26 @@ func (c *Container) Start(ctx context.Context) (err error) {
return err
}
}
-
- // Container must be created or stopped to be started
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateCreated ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
- }
-
- notRunning, err := c.checkDependenciesRunning()
- if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
- }
- if len(notRunning) > 0 {
- depString := strings.Join(notRunning, ",")
- return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
- }
-
- defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
- }
- }
- }()
-
- if err := c.prepare(); err != nil {
+ if err := c.prepareToStart(ctx, recursive); err != nil {
return err
}
- if c.state.State == ContainerStateStopped {
- // Reinitialize the container if we need to
- if err := c.reinit(ctx); err != nil {
- return err
- }
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
- // Or initialize it if necessary
- if err := c.init(ctx); err != nil {
- return err
- }
- }
-
// Start the container
return c.start()
}
-// StartAndAttach starts a container and attaches to it
-// StartAndAttach can start configured, created or stopped containers
+// StartAndAttach starts a container and attaches to it.
+// StartAndAttach can start configured, created or stopped containers.
// For configured containers, the container will be initialized first, then
-// started
+// started.
// Stopped containers will be deleted and re-created in runc, undergoing a fresh
-// Init()
+// Init().
// If successful, an error channel will be returned containing the result of the
// attach call.
// The channel will be closed automatically after the result of attach has been
-// sent
-func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize) (attachResChan <-chan error, err error) {
+// sent.
+// If recursive is set, StartAndAttach will also start all containers this container depends on.
+func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, recursive bool) (attachResChan <-chan error, err error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -150,48 +112,10 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
}
}
- // Container must be created or stopped to be started
- if !(c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateCreated ||
- c.state.State == ContainerStateStopped ||
- c.state.State == ContainerStateExited) {
- return nil, errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
- }
-
- notRunning, err := c.checkDependenciesRunning()
- if err != nil {
- return nil, errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
- }
- if len(notRunning) > 0 {
- depString := strings.Join(notRunning, ",")
- return nil, errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
- }
-
- defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
- }
- }
- }()
-
- if err := c.prepare(); err != nil {
+ if err := c.prepareToStart(ctx, recursive); err != nil {
return nil, err
}
- if c.state.State == ContainerStateStopped {
- // Reinitialize the container if we need to
- if err := c.reinit(ctx); err != nil {
- return nil, err
- }
- } else if c.state.State == ContainerStateConfigured ||
- c.state.State == ContainerStateExited {
- // Or initialize it if necessary
- if err := c.init(ctx); err != nil {
- return nil, err
- }
- }
-
attachChan := make(chan error)
// Attach to the container before starting it
@@ -205,6 +129,24 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
return attachChan, nil
}
+// RestartWithTimeout restarts a running container and takes a given timeout in uint
+func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+ }
+
+ if err = c.checkDependenciesAndHandleError(ctx); err != nil {
+ return err
+ }
+
+ return c.restartWithTimeout(ctx, timeout)
+}
+
// Stop uses the container's stop signal (or SIGTERM if no signal was specified)
// to stop the container, and if it has not stopped after container's stop
// timeout, SIGKILL is used to attempt to forcibly stop the container
@@ -730,28 +672,6 @@ func (c *Container) Sync() error {
return nil
}
-// RestartWithTimeout restarts a running container and takes a given timeout in uint
-func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err error) {
- if !c.batched {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- if err := c.syncContainer(); err != nil {
- return err
- }
- }
-
- notRunning, err := c.checkDependenciesRunning()
- if err != nil {
- return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
- }
- if len(notRunning) > 0 {
- depString := strings.Join(notRunning, ",")
- return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
- }
- return c.restartWithTimeout(ctx, timeout)
-}
-
// Refresh refreshes a container's state in the database, restarting the
// container if it is running
func (c *Container) Refresh(ctx context.Context) error {
diff --git a/libpod/container_attach_linux.go b/libpod/container_attach_linux.go
index 1d6f0bd96..3ff6ddc76 100644
--- a/libpod/container_attach_linux.go
+++ b/libpod/container_attach_linux.go
@@ -109,8 +109,8 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
case err := <-receiveStdoutError:
return err
case err := <-stdinDone:
- if _, ok := err.(utils.DetachError); ok {
- return nil
+ if err == ErrDetach {
+ return err
}
if streams.AttachOutput || streams.AttachError {
return <-receiveStdoutError
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 026611e51..5c4fd1a31 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -162,7 +162,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
importBuilder.SetWorkDir(splitChange[1])
}
}
- candidates, _, err := util.ResolveName(destImage, "", sc, c.runtime.store)
+ candidates, _, _, err := util.ResolveName(destImage, "", sc, c.runtime.store)
if err != nil {
return nil, errors.Wrapf(err, "error resolving name %q", destImage)
}
diff --git a/libpod/container_graph.go b/libpod/container_graph.go
index 44a1f1736..da93be77d 100644
--- a/libpod/container_graph.go
+++ b/libpod/container_graph.go
@@ -1,6 +1,9 @@
package libpod
import (
+ "context"
+ "strings"
+
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -169,3 +172,97 @@ func detectCycles(graph *containerGraph) (bool, error) {
return false, nil
}
+
+// Visit a node on a container graph and start the container, or set an error if
+// a dependency failed to start. if restart is true, startNode will restart the node instead of starting it.
+func startNode(ctx context.Context, node *containerNode, setError bool, ctrErrors map[string]error, ctrsVisited map[string]bool, restart bool) {
+ // First, check if we have already visited the node
+ if ctrsVisited[node.id] {
+ return
+ }
+
+ // If setError is true, a dependency of us failed
+ // Mark us as failed and recurse
+ if setError {
+ // Mark us as visited, and set an error
+ ctrsVisited[node.id] = true
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
+
+ // Hit anyone who depends on us, and set errors on them too
+ for _, successor := range node.dependedOn {
+ startNode(ctx, successor, true, ctrErrors, ctrsVisited, restart)
+ }
+
+ return
+ }
+
+ // Have all our dependencies started?
+ // If not, don't visit the node yet
+ depsVisited := true
+ for _, dep := range node.dependsOn {
+ depsVisited = depsVisited && ctrsVisited[dep.id]
+ }
+ if !depsVisited {
+ // Don't visit us yet, all dependencies are not up
+ // We'll hit the dependencies eventually, and when we do it will
+ // recurse here
+ return
+ }
+
+ // Going to try to start the container, mark us as visited
+ ctrsVisited[node.id] = true
+
+ ctrErrored := false
+
+ // Check if dependencies are running
+ // Graph traversal means we should have started them
+ // But they could have died before we got here
+ // Does not require that the container be locked, we only need to lock
+ // the dependencies
+ depsStopped, err := node.container.checkDependenciesRunning()
+ if err != nil {
+ ctrErrors[node.id] = err
+ ctrErrored = true
+ } else if len(depsStopped) > 0 {
+ // Our dependencies are not running
+ depsList := strings.Join(depsStopped, ",")
+ ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
+ ctrErrored = true
+ }
+
+ // Lock before we start
+ node.container.lock.Lock()
+
+ // Sync the container to pick up current state
+ if !ctrErrored {
+ if err := node.container.syncContainer(); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+
+ // Start the container (only if it is not running)
+ if !ctrErrored {
+ if !restart && node.container.state.State != ContainerStateRunning {
+ if err := node.container.initAndStart(ctx); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+ if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
+ if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
+ ctrErrored = true
+ ctrErrors[node.id] = err
+ }
+ }
+ }
+
+ node.container.lock.Unlock()
+
+ // Recurse to anyone who depends on us and start them
+ for _, successor := range node.dependedOn {
+ startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
+ }
+
+ return
+}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index b0dcc853e..e3753d825 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -10,22 +10,18 @@ import (
"path/filepath"
"strconv"
"strings"
- "syscall"
"time"
- "github.com/containers/buildah/imagebuildah"
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
"github.com/containers/libpod/pkg/hooks/exec"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/mount"
- "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/text/language"
@@ -252,6 +248,10 @@ func (c *Container) syncContainer() error {
// Create container root filesystem for use
func (c *Container) setupStorage(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "setupStorage")
+ span.SetTag("type", "container")
+ defer span.Finish()
+
if !c.valid {
return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID())
}
@@ -489,9 +489,20 @@ func (c *Container) removeConmonFiles() error {
return errors.Wrapf(err, "error removing container %s OOM file", c.ID())
}
+ // Instead of outright deleting the exit file, rename it (if it exists).
+ // We want to retain it so we can get the exit code of containers which
+ // are removed (at least until we have a workable events system)
exitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID())
- if err := os.Remove(exitFile); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s exit file", c.ID())
+ oldExitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID()))
+ if _, err := os.Stat(exitFile); err != nil {
+ if !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
+ }
+ } else if err == nil {
+ // Rename should replace the old exit file (if it exists)
+ if err := os.Rename(exitFile, oldExitFile); err != nil {
+ return errors.Wrapf(err, "error renaming container %s exit file", c.ID())
+ }
}
return nil
@@ -553,6 +564,160 @@ func (c *Container) save() error {
return nil
}
+// Checks the container is in the right state, then initializes the container in preparation to start the container.
+// If recursive is true, each of the containers dependencies will be started.
+// Otherwise, this function will return with error if there are dependencies of this container that aren't running.
+func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) {
+ // Container must be created or stopped to be started
+ if !(c.state.State == ContainerStateConfigured ||
+ c.state.State == ContainerStateCreated ||
+ c.state.State == ContainerStateStopped ||
+ c.state.State == ContainerStateExited) {
+ return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
+ }
+
+ if !recursive {
+ if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ return err
+ }
+ } else {
+ if err := c.startDependencies(ctx); err != nil {
+ return err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ if err2 := c.cleanup(ctx); err2 != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ }
+ }
+ }()
+
+ if err := c.prepare(); err != nil {
+ return err
+ }
+
+ if c.state.State == ContainerStateStopped {
+ // Reinitialize the container if we need to
+ if err := c.reinit(ctx); err != nil {
+ return err
+ }
+ } else if c.state.State == ContainerStateConfigured ||
+ c.state.State == ContainerStateExited {
+ // Or initialize it if necessary
+ if err := c.init(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// checks dependencies are running and prints a helpful message
+func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error {
+ notRunning, err := c.checkDependenciesRunning()
+ if err != nil {
+ return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
+ }
+ if len(notRunning) > 0 {
+ depString := strings.Join(notRunning, ",")
+ return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
+ }
+
+ return nil
+}
+
+// Recursively start all dependencies of a container so the container can be started.
+func (c *Container) startDependencies(ctx context.Context) error {
+ depCtrIDs := c.Dependencies()
+ if len(depCtrIDs) == 0 {
+ return nil
+ }
+
+ depVisitedCtrs := make(map[string]*Container)
+ if err := c.getAllDependencies(depVisitedCtrs); err != nil {
+ return errors.Wrapf(err, "error starting dependency for container %s", c.ID())
+ }
+
+ // Because of how Go handles passing slices through functions, a slice cannot grow between function calls
+ // without clunky syntax. Circumnavigate this by translating the map to a slice for buildContainerGraph
+ depCtrs := make([]*Container, 0)
+ for _, ctr := range depVisitedCtrs {
+ depCtrs = append(depCtrs, ctr)
+ }
+
+ // Build a dependency graph of containers
+ graph, err := buildContainerGraph(depCtrs)
+ if err != nil {
+ return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID())
+ }
+
+ // If there are no containers without dependencies, we can't start
+ // Error out
+ if len(graph.noDepNodes) == 0 {
+ // we have no dependencies that need starting, go ahead and return
+ if len(graph.nodes) == 0 {
+ return nil
+ }
+ return errors.Wrapf(ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
+ }
+
+ ctrErrors := make(map[string]error)
+ ctrsVisited := make(map[string]bool)
+
+ // Traverse the graph beginning at nodes with no dependencies
+ for _, node := range graph.noDepNodes {
+ startNode(ctx, node, false, ctrErrors, ctrsVisited, true)
+ }
+
+ if len(ctrErrors) > 0 {
+ logrus.Errorf("error starting some container dependencies")
+ for _, e := range ctrErrors {
+ logrus.Errorf("%q", e)
+ }
+ return errors.Wrapf(ErrInternal, "error starting some containers")
+ }
+ return nil
+}
+
+// getAllDependencies is a precursor to starting dependencies.
+// To start a container with all of its dependencies, we need to recursively find all dependencies
+// a container has, as well as each of those containers' dependencies, and so on
+// To do so, keep track of containers already visisted (so there aren't redundant state lookups),
+// and recursively search until we have reached the leafs of every dependency node.
+// Since we need to start all dependencies for our original container to successfully start, we propegate any errors
+// in looking up dependencies.
+// Note: this function is currently meant as a robust solution to a narrow problem: start an infra-container when
+// a container in the pod is run. It has not been tested for performance past one level, so expansion of recursive start
+// must be tested first.
+func (c *Container) getAllDependencies(visited map[string]*Container) error {
+ depIDs := c.Dependencies()
+ if len(depIDs) == 0 {
+ return nil
+ }
+ for _, depID := range depIDs {
+ if _, ok := visited[depID]; !ok {
+ dep, err := c.runtime.state.LookupContainer(depID)
+ if err != nil {
+ return err
+ }
+ status, err := dep.State()
+ if err != nil {
+ return err
+ }
+ // if the dependency is already running, we can assume its dependencies are also running
+ // so no need to add them to those we need to start
+ if status != ContainerStateRunning {
+ visited[depID] = dep
+ if err := dep.getAllDependencies(visited); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
// Check if a container's dependencies are running
// Returns a []string containing the IDs of dependencies that are not running
func (c *Container) checkDependenciesRunning() ([]string, error) {
@@ -630,6 +795,10 @@ func (c *Container) completeNetworkSetup() error {
// Initialize a container, creating it in the runtime
func (c *Container) init(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "init")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
// Generate the OCI spec
spec, err := c.generateSpec(ctx)
if err != nil {
@@ -663,6 +832,10 @@ func (c *Container) init(ctx context.Context) error {
// Deletes the container in the runtime, and resets its state to Exited.
// The container can be restarted cleanly after this.
func (c *Container) cleanupRuntime(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "cleanupRuntime")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
// If the container is not ContainerStateStopped, do nothing
if c.state.State != ContainerStateStopped {
return nil
@@ -698,6 +871,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// Not necessary for ContainerStateExited - the container has already been
// removed from the runtime, so init() can proceed freely.
func (c *Container) reinit(ctx context.Context) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "reinit")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
logrus.Debugf("Recreating container %s in OCI runtime", c.ID())
if err := c.cleanupRuntime(ctx); err != nil {
@@ -930,6 +1107,10 @@ func (c *Container) cleanupStorage() error {
func (c *Container) cleanup(ctx context.Context) error {
var lastError error
+ span, _ := opentracing.StartSpanFromContext(ctx, "cleanup")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
logrus.Debugf("Cleaning up container %s", c.ID())
// Clean up network namespace, if present
@@ -961,6 +1142,10 @@ func (c *Container) cleanup(ctx context.Context) error {
// delete deletes the container and runs any configured poststop
// hooks.
func (c *Container) delete(ctx context.Context) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "delete")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if err := c.runtime.ociRuntime.deleteContainer(c); err != nil {
return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
}
@@ -976,6 +1161,10 @@ func (c *Container) delete(ctx context.Context) (err error) {
// the OCI Runtime Specification (which requires them to run
// post-delete, despite the stage name).
func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "postDeleteHooks")
+ span.SetTag("struct", "container")
+ defer span.Finish()
+
if c.state.ExtensionStageHooks != nil {
extensionHooks, ok := c.state.ExtensionStageHooks["poststop"]
if ok {
@@ -1042,113 +1231,6 @@ func (c *Container) writeStringToRundir(destFile, output string) (string, error)
return filepath.Join(c.state.DestinationRunDir, destFile), nil
}
-func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator, execUser *user.ExecUser) error {
- var uid, gid int
- mountPoint := c.state.Mountpoint
- if !c.state.Mounted {
- return errors.Wrapf(ErrInternal, "container is not mounted")
- }
- newImage, err := c.runtime.imageRuntime.NewFromLocal(c.config.RootfsImageID)
- if err != nil {
- return err
- }
- imageData, err := newImage.Inspect(ctx)
- if err != nil {
- return err
- }
- // Add the built-in volumes of the container passed in to --volumes-from
- for _, vol := range c.config.LocalVolumes {
- if imageData.Config.Volumes == nil {
- imageData.Config.Volumes = map[string]struct{}{
- vol: {},
- }
- } else {
- imageData.Config.Volumes[vol] = struct{}{}
- }
- }
-
- if c.config.User != "" {
- if execUser == nil {
- return errors.Wrapf(ErrInternal, "nil pointer passed to addLocalVolumes for execUser")
- }
- uid = execUser.Uid
- gid = execUser.Gid
- }
-
- for k := range imageData.Config.Volumes {
- mount := spec.Mount{
- Destination: k,
- Type: "bind",
- Options: []string{"private", "bind", "rw"},
- }
- if MountExists(g.Mounts(), k) {
- continue
- }
- volumePath := filepath.Join(c.config.StaticDir, "volumes", k)
-
- // Ensure the symlinks are resolved
- resolvedSymlink, err := imagebuildah.ResolveSymLink(mountPoint, k)
- if err != nil {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot resolve %s in %s for container %s", k, mountPoint, c.ID())
- }
- var srcPath string
- if resolvedSymlink != "" {
- srcPath = filepath.Join(mountPoint, resolvedSymlink)
- } else {
- srcPath = filepath.Join(mountPoint, k)
- }
-
- if _, err := os.Stat(srcPath); os.IsNotExist(err) {
- logrus.Infof("Volume image mount point %s does not exist in root FS, need to create it", k)
- if err = os.MkdirAll(srcPath, 0755); err != nil {
- return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = os.Chown(srcPath, uid, gid); err != nil {
- return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", srcPath, k, c.ID())
- }
- }
-
- if _, err := os.Stat(volumePath); os.IsNotExist(err) {
- if err = os.MkdirAll(volumePath, 0755); err != nil {
- return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = os.Chown(volumePath, uid, gid); err != nil {
- return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
-
- if err = label.Relabel(volumePath, c.config.MountLabel, false); err != nil {
- return errors.Wrapf(err, "error relabeling directory %q for volume %q in container %q", volumePath, k, c.ID())
- }
- if err = chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, k, c.ID(), srcPath)
- }
-
- // Set the volume path with the same owner and permission of source path
- sstat, _ := os.Stat(srcPath)
- st, ok := sstat.Sys().(*syscall.Stat_t)
- if !ok {
- return fmt.Errorf("could not convert to syscall.Stat_t")
- }
- uid := int(st.Uid)
- gid := int(st.Gid)
-
- if err := os.Lchown(volumePath, uid, gid); err != nil {
- return err
- }
- if os.Chmod(volumePath, sstat.Mode()); err != nil {
- return err
- }
-
- }
-
- mount.Source = volumePath
- g.AddMount(mount)
- }
- return nil
-}
-
// Save OCI spec to disk, replacing any existing specs for the container
func (c *Container) saveSpec(spec *spec.Spec) error {
// If the OCI spec already exists, we need to replace it
@@ -1292,3 +1374,30 @@ func getExcludedCGroups() (excludes []string) {
excludes = []string{"rdma"}
return
}
+
+// namedVolumes returns named volumes for the container
+func (c *Container) namedVolumes() ([]string, error) {
+ var volumes []string
+ for _, vol := range c.config.Spec.Mounts {
+ if strings.HasPrefix(vol.Source, c.runtime.config.VolumePath) {
+ volume := strings.TrimPrefix(vol.Source, c.runtime.config.VolumePath+"/")
+ split := strings.Split(volume, "/")
+ volume = split[0]
+ if _, err := c.runtime.state.Volume(volume); err == nil {
+ volumes = append(volumes, volume)
+ }
+ }
+ }
+ return volumes, nil
+}
+
+// this should be from chrootarchive.
+func (c *Container) copyWithTarFromImage(src, dest string) error {
+ mountpoint, err := c.mount()
+ if err != nil {
+ return err
+ }
+ a := archive.NewDefaultArchiver()
+ source := filepath.Join(mountpoint, src)
+ return a.CopyWithTar(source, dest)
+}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index bcdfdaee3..b074efa3a 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -26,11 +26,11 @@ import (
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/secrets"
"github.com/containers/storage/pkg/idtools"
- "github.com/mrunalp/fileutils"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@@ -170,10 +170,15 @@ func (c *Container) cleanupNetwork() error {
// Generate spec for a container
// Accepts a map of the container's dependencies
func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "generateSpec")
+ span.SetTag("type", "container")
+ defer span.Finish()
+
execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, nil)
if err != nil {
return nil, err
}
+
g := generate.NewFromSpec(c.config.Spec)
// If network namespace was requested, add it now
@@ -235,13 +240,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
}
- // Bind builtin image volumes
- if c.config.Rootfs == "" && c.config.ImageVolumes {
- if err := c.addLocalVolumes(ctx, &g, execUser); err != nil {
- return nil, errors.Wrapf(err, "error mounting image volumes")
- }
- }
-
if c.config.User != "" {
// User and Group must go together
g.SetProcessUID(uint32(execUser.Uid))
@@ -483,6 +481,19 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
if c.state.State != ContainerStateRunning {
return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
+
+ // Create the CRIU log file and label it
+ dumpLog := filepath.Join(c.bundlePath(), "dump.log")
+
+ logFile, err := os.OpenFile(dumpLog, os.O_CREATE, 0600)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create CRIU log file %q", dumpLog)
+ }
+ logFile.Close()
+ if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
+ return errors.Wrapf(err, "failed to label CRIU log file %q", dumpLog)
+ }
+
if err := c.runtime.ociRuntime.checkpointContainer(c, options); err != nil {
return err
}
@@ -678,20 +689,12 @@ func (c *Container) makeBindMounts() error {
// If it doesn't, don't copy them
resolvPath, exists := bindMounts["/etc/resolv.conf"]
if exists {
- resolvDest := filepath.Join(c.state.RunDir, "resolv.conf")
- if err := fileutils.CopyFile(resolvPath, resolvDest); err != nil {
- return errors.Wrapf(err, "error copying resolv.conf from dependency container %s of container %s", depCtr.ID(), c.ID())
- }
- c.state.BindMounts["/etc/resolv.conf"] = resolvDest
- }
+ c.state.BindMounts["/etc/resolv.conf"] = resolvPath
+ }
hostsPath, exists := bindMounts["/etc/hosts"]
if exists {
- hostsDest := filepath.Join(c.state.RunDir, "hosts")
- if err := fileutils.CopyFile(hostsPath, hostsDest); err != nil {
- return errors.Wrapf(err, "error copying hosts file from dependency container %s of container %s", depCtr.ID(), c.ID())
- }
- c.state.BindMounts["/etc/hosts"] = hostsDest
+ c.state.BindMounts["/etc/hosts"] = hostsPath
}
} else {
newResolv, err := c.generateResolvConf()
@@ -706,6 +709,14 @@ func (c *Container) makeBindMounts() error {
}
c.state.BindMounts["/etc/hosts"] = newHosts
}
+
+ if err := label.Relabel(c.state.BindMounts["/etc/hosts"], c.config.MountLabel, true); err != nil {
+ return err
+ }
+
+ if err := label.Relabel(c.state.BindMounts["/etc/resolv.conf"], c.config.MountLabel, true); err != nil {
+ return err
+ }
}
// SHM is always added when we mount the container
@@ -759,8 +770,24 @@ func (c *Container) makeBindMounts() error {
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() (string, error) {
+ resolvConf := "/etc/resolv.conf"
+ for _, ns := range c.config.Spec.Linux.Namespaces {
+ if ns.Type == spec.NetworkNamespace {
+ if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
+ definedPath := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
+ _, err := os.Stat(definedPath)
+ if err == nil {
+ resolvConf = definedPath
+ } else if !os.IsNotExist(err) {
+ return "", errors.Wrapf(err, "failed to stat %s", definedPath)
+ }
+ }
+ break
+ }
+ }
+
// Determine the endpoint for resolv.conf in case it is a symlink
- resolvPath, err := filepath.EvalSymlinks("/etc/resolv.conf")
+ resolvPath, err := filepath.EvalSymlinks(resolvConf)
if err != nil {
return "", err
}
@@ -810,7 +837,7 @@ func (c *Container) generateResolvConf() (string, error) {
}
// Relabel resolv.conf for the container
- if err := label.Relabel(destPath, c.config.MountLabel, false); err != nil {
+ if err := label.Relabel(destPath, c.config.MountLabel, true); err != nil {
return "", err
}
diff --git a/libpod/errors.go b/libpod/errors.go
index d6614141c..dd82d0796 100644
--- a/libpod/errors.go
+++ b/libpod/errors.go
@@ -2,15 +2,21 @@ package libpod
import (
"errors"
+
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/utils"
)
var (
// ErrNoSuchCtr indicates the requested container does not exist
- ErrNoSuchCtr = errors.New("no such container")
+ ErrNoSuchCtr = image.ErrNoSuchCtr
+
// ErrNoSuchPod indicates the requested pod does not exist
- ErrNoSuchPod = errors.New("no such pod")
+ ErrNoSuchPod = image.ErrNoSuchPod
+
// ErrNoSuchImage indicates the requested image does not exist
- ErrNoSuchImage = errors.New("no such image")
+ ErrNoSuchImage = image.ErrNoSuchImage
+
// ErrNoSuchVolume indicates the requested volume does not exist
ErrNoSuchVolume = errors.New("no such volume")
@@ -51,6 +57,10 @@ var (
// ErrInternal indicates an internal library error
ErrInternal = errors.New("internal libpod error")
+ // ErrDetach indicates that an attach session was manually detached by
+ // the user.
+ ErrDetach = utils.ErrDetach
+
// ErrRuntimeStopped indicates that the runtime has already been shut
// down and no further operations can be performed on it
ErrRuntimeStopped = errors.New("runtime has already been stopped")
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 739372e77..b20419d7b 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -5,14 +5,18 @@ import (
"encoding/json"
"fmt"
"io"
+ "os"
"strings"
"syscall"
"time"
types2 "github.com/containernetworking/cni/pkg/types"
cp "github.com/containers/image/copy"
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ ociarchive "github.com/containers/image/oci/archive"
is "github.com/containers/image/storage"
"github.com/containers/image/tarball"
"github.com/containers/image/transports"
@@ -26,7 +30,9 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/reexec"
digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -126,6 +132,10 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) {
// New creates a new image object where the image could be local
// or remote
func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, forcePull bool, label *string) (*Image, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "newImage")
+ span.SetTag("type", "runtime")
+ defer span.Finish()
+
// We don't know if the image is local or not ... check local first
newImage := Image{
InputName: name,
@@ -805,6 +815,10 @@ func (i *Image) imageInspectInfo(ctx context.Context) (*types.ImageInspectInfo,
// Inspect returns an image's inspect data
func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "imageInspect")
+ span.SetTag("type", "image")
+ defer span.Finish()
+
ociv1Img, err := i.ociv1Image(ctx)
if err != nil {
return nil, err
@@ -1075,3 +1089,65 @@ func (i *Image) Comment(ctx context.Context, manifestType string) (string, error
}
return ociv1Img.History[0].Comment, nil
}
+
+// Save writes a container image to the filesystem
+func (i *Image) Save(ctx context.Context, source, format, output string, moreTags []string, quiet, compress bool) error {
+ var (
+ writer io.Writer
+ destRef types.ImageReference
+ manifestType string
+ err error
+ )
+
+ if quiet {
+ writer = os.Stderr
+ }
+ switch format {
+ case "oci-archive":
+ destImageName := imageNameForSaveDestination(i, source)
+ destRef, err = ociarchive.NewReference(output, destImageName) // destImageName may be ""
+ if err != nil {
+ return errors.Wrapf(err, "error getting OCI archive ImageReference for (%q, %q)", output, destImageName)
+ }
+ case "oci-dir":
+ destRef, err = directory.NewReference(output)
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
+ }
+ manifestType = imgspecv1.MediaTypeImageManifest
+ case "docker-dir":
+ destRef, err = directory.NewReference(output)
+ if err != nil {
+ return errors.Wrapf(err, "error getting directory ImageReference for %q", output)
+ }
+ manifestType = manifest.DockerV2Schema2MediaType
+ case "docker-archive", "":
+ dst := output
+ destImageName := imageNameForSaveDestination(i, source)
+ if destImageName != "" {
+ dst = fmt.Sprintf("%s:%s", dst, destImageName)
+ }
+ destRef, err = dockerarchive.ParseReference(dst) // FIXME? Add dockerarchive.NewReference
+ if err != nil {
+ return errors.Wrapf(err, "error getting Docker archive ImageReference for %q", dst)
+ }
+ default:
+ return errors.Errorf("unknown format option %q", format)
+ }
+ // supports saving multiple tags to the same tar archive
+ var additionaltags []reference.NamedTagged
+ if len(moreTags) > 0 {
+ additionaltags, err = GetAdditionalTags(moreTags)
+ if err != nil {
+ return err
+ }
+ }
+ if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil {
+ if err2 := os.Remove(output); err2 != nil {
+ logrus.Errorf("error deleting %q: %v", output, err)
+ }
+ return errors.Wrapf(err, "unable to save %q", source)
+ }
+
+ return nil
+}
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 6fef96e37..6b9f7fc67 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -19,6 +19,7 @@ import (
"github.com/containers/image/types"
"github.com/containers/libpod/pkg/registries"
multierror "github.com/hashicorp/go-multierror"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -109,6 +110,9 @@ func (ir *Runtime) getSinglePullRefPairGoal(srcRef types.ImageReference, destNam
// pullGoalFromImageReference returns a pull goal for a single ImageReference, depending on the used transport.
func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.ImageReference, imgName string, sc *types.SystemContext) (*pullGoal, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "pullGoalFromImageReference")
+ defer span.Finish()
+
// supports pulling from docker-archive, oci, and registries
switch srcRef.Transport().Name() {
case DockerArchive:
@@ -194,6 +198,9 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
// pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries.
// Use pullImageFromReference if the source is known precisely.
func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, label *string) ([]string, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromHeuristicSource")
+ defer span.Finish()
+
var goal *pullGoal
sc := GetSystemContext(signaturePolicyPath, authfile, false)
srcRef, err := alltransports.ParseImageName(inputName)
@@ -214,6 +221,9 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
// pullImageFromReference pulls an image from a types.imageReference.
func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromReference")
+ defer span.Finish()
+
sc := GetSystemContext(signaturePolicyPath, authfile, false)
goal, err := ir.pullGoalFromImageReference(ctx, srcRef, transports.ImageName(srcRef), sc)
if err != nil {
@@ -224,6 +234,9 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag
// doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead.
func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, label *string) ([]string, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "doPullImage")
+ defer span.Finish()
+
policyContext, err := getPolicyContext(sc)
if err != nil {
return nil, err
diff --git a/libpod/image/search.go b/libpod/image/search.go
new file mode 100644
index 000000000..212eff00b
--- /dev/null
+++ b/libpod/image/search.go
@@ -0,0 +1,277 @@
+package image
+
+import (
+ "context"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/containers/image/docker"
+ "github.com/containers/image/types"
+ "github.com/containers/libpod/libpod/common"
+ sysreg "github.com/containers/libpod/pkg/registries"
+ "github.com/fatih/camelcase"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
+)
+
+const (
+ descriptionTruncLength = 44
+ maxQueries = 25
+ maxParallelSearches = int64(6)
+)
+
+// SearchResult is holding image-search related data.
+type SearchResult struct {
+ // Index is the image index (e.g., "docker.io" or "quay.io")
+ Index string
+ // Name is the canoncical name of the image (e.g., "docker.io/library/alpine").
+ Name string
+ // Description of the image.
+ Description string
+ // Stars is the number of stars of the image.
+ Stars int
+ // Official indicates if it's an official image.
+ Official string
+ // Automated indicates if the image was created by an automated build.
+ Automated string
+}
+
+// SearchOptions are used to control the behaviour of SearchImages.
+type SearchOptions struct {
+ // Filter allows to filter the results.
+ Filter SearchFilter
+ // Limit limits the number of queries per index (default: 25). Must be
+ // greater than 0 to overwrite the default value.
+ Limit int
+ // NoTrunc avoids the output to be truncated.
+ NoTrunc bool
+ // Authfile is the path to the authentication file.
+ Authfile string
+ // InsecureSkipTLSVerify allows to skip TLS verification.
+ InsecureSkipTLSVerify types.OptionalBool
+}
+
+// SearchFilter allows filtering the results of SearchImages.
+type SearchFilter struct {
+ // Stars describes the minimal amount of starts of an image.
+ Stars int
+ // IsAutomated decides if only images from automated builds are displayed.
+ IsAutomated types.OptionalBool
+ // IsOfficial decides if only official images are displayed.
+ IsOfficial types.OptionalBool
+}
+
+func splitCamelCase(src string) string {
+ entries := camelcase.Split(src)
+ return strings.Join(entries, " ")
+}
+
+// HeaderMap returns the headers of a SearchResult.
+func (s *SearchResult) HeaderMap() map[string]string {
+ v := reflect.Indirect(reflect.ValueOf(s))
+ values := make(map[string]string, v.NumField())
+
+ for i := 0; i < v.NumField(); i++ {
+ key := v.Type().Field(i).Name
+ value := key
+ values[key] = strings.ToUpper(splitCamelCase(value))
+ }
+ return values
+}
+
+// SearchImages searches images based on term and the specified SearchOptions
+// in all registries.
+func SearchImages(term string, options SearchOptions) ([]SearchResult, error) {
+ // Check if search term has a registry in it
+ registry, err := sysreg.GetRegistry(term)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting registry from %q", term)
+ }
+ if registry != "" {
+ term = term[len(registry)+1:]
+ }
+
+ registries, err := getRegistries(registry)
+ if err != nil {
+ return nil, err
+ }
+
+ // searchOutputData is used as a return value for searching in parallel.
+ type searchOutputData struct {
+ data []SearchResult
+ err error
+ }
+
+ // Let's follow Firefox by limiting parallel downloads to 6.
+ sem := semaphore.NewWeighted(maxParallelSearches)
+ wg := sync.WaitGroup{}
+ wg.Add(len(registries))
+ data := make([]searchOutputData, len(registries))
+
+ searchImageInRegistryHelper := func(index int, registry string) {
+ defer sem.Release(1)
+ defer wg.Done()
+ searchOutput, err := searchImageInRegistry(term, registry, options)
+ data[index] = searchOutputData{data: searchOutput, err: err}
+ }
+
+ ctx := context.Background()
+ for i := range registries {
+ sem.Acquire(ctx, 1)
+ go searchImageInRegistryHelper(i, registries[i])
+ }
+
+ wg.Wait()
+ results := []SearchResult{}
+ for _, d := range data {
+ if d.err != nil {
+ return nil, d.err
+ }
+ results = append(results, d.data...)
+ }
+ return results, nil
+}
+
+// getRegistries returns the list of registries to search, depending on an optional registry specification
+func getRegistries(registry string) ([]string, error) {
+ var registries []string
+ if registry != "" {
+ registries = append(registries, registry)
+ } else {
+ var err error
+ registries, err = sysreg.GetRegistries()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting registries to search")
+ }
+ }
+ return registries, nil
+}
+
+func searchImageInRegistry(term string, registry string, options SearchOptions) ([]SearchResult, error) {
+ // Max number of queries by default is 25
+ limit := maxQueries
+ if options.Limit > 0 {
+ limit = options.Limit
+ }
+
+ sc := common.GetSystemContext("", options.Authfile, false)
+ sc.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
+ // FIXME: Set this more globally. Probably no reason not to have it in
+ // every types.SystemContext, and to compute the value just once in one
+ // place.
+ sc.SystemRegistriesConfPath = sysreg.SystemRegistriesConfPath()
+ results, err := docker.SearchRegistry(context.TODO(), sc, registry, term, limit)
+ if err != nil {
+ logrus.Errorf("error searching registry %q: %v", registry, err)
+ return []SearchResult{}, nil
+ }
+ index := registry
+ arr := strings.Split(registry, ".")
+ if len(arr) > 2 {
+ index = strings.Join(arr[len(arr)-2:], ".")
+ }
+
+ // limit is the number of results to output
+ // if the total number of results is less than the limit, output all
+ // if the limit has been set by the user, output those number of queries
+ limit = maxQueries
+ if len(results) < limit {
+ limit = len(results)
+ }
+ if options.Limit != 0 && options.Limit < len(results) {
+ limit = options.Limit
+ }
+
+ paramsArr := []SearchResult{}
+ for i := 0; i < limit; i++ {
+ // Check whether query matches filters
+ if !(options.Filter.matchesAutomatedFilter(results[i]) && options.Filter.matchesOfficialFilter(results[i]) && options.Filter.matchesStarFilter(results[i])) {
+ continue
+ }
+ official := ""
+ if results[i].IsOfficial {
+ official = "[OK]"
+ }
+ automated := ""
+ if results[i].IsAutomated {
+ automated = "[OK]"
+ }
+ description := strings.Replace(results[i].Description, "\n", " ", -1)
+ if len(description) > 44 && !options.NoTrunc {
+ description = description[:descriptionTruncLength] + "..."
+ }
+ name := registry + "/" + results[i].Name
+ if index == "docker.io" && !strings.Contains(results[i].Name, "/") {
+ name = index + "/library/" + results[i].Name
+ }
+ params := SearchResult{
+ Index: index,
+ Name: name,
+ Description: description,
+ Official: official,
+ Automated: automated,
+ Stars: results[i].StarCount,
+ }
+ paramsArr = append(paramsArr, params)
+ }
+ return paramsArr, nil
+}
+
+// ParseSearchFilter turns the filter into a SearchFilter that can be used for
+// searching images.
+func ParseSearchFilter(filter []string) (*SearchFilter, error) {
+ sFilter := new(SearchFilter)
+ for _, f := range filter {
+ arr := strings.Split(f, "=")
+ switch arr[0] {
+ case "stars":
+ if len(arr) < 2 {
+ return nil, errors.Errorf("invalid `stars` filter %q, should be stars=<value>", filter)
+ }
+ stars, err := strconv.Atoi(arr[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "incorrect value type for stars filter")
+ }
+ sFilter.Stars = stars
+ break
+ case "is-automated":
+ if len(arr) == 2 && arr[1] == "false" {
+ sFilter.IsAutomated = types.OptionalBoolFalse
+ } else {
+ sFilter.IsAutomated = types.OptionalBoolTrue
+ }
+ break
+ case "is-official":
+ if len(arr) == 2 && arr[1] == "false" {
+ sFilter.IsOfficial = types.OptionalBoolFalse
+ } else {
+ sFilter.IsOfficial = types.OptionalBoolTrue
+ }
+ break
+ default:
+ return nil, errors.Errorf("invalid filter type %q", f)
+ }
+ }
+ return sFilter, nil
+}
+
+func (f *SearchFilter) matchesStarFilter(result docker.SearchResult) bool {
+ return result.StarCount >= f.Stars
+}
+
+func (f *SearchFilter) matchesAutomatedFilter(result docker.SearchResult) bool {
+ if f.IsAutomated != types.OptionalBoolUndefined {
+ return result.IsAutomated == (f.IsAutomated == types.OptionalBoolTrue)
+ }
+ return true
+}
+
+func (f *SearchFilter) matchesOfficialFilter(result docker.SearchResult) bool {
+ if f.IsOfficial != types.OptionalBoolUndefined {
+ return result.IsOfficial == (f.IsOfficial == types.OptionalBoolTrue)
+ }
+ return true
+}
diff --git a/libpod/image/utils.go b/libpod/image/utils.go
index ad027f32a..544796a4b 100644
--- a/libpod/image/utils.go
+++ b/libpod/image/utils.go
@@ -1,6 +1,7 @@
package image
import (
+ "fmt"
"io"
"net/url"
"regexp"
@@ -87,22 +88,29 @@ func hasTransport(image string) bool {
// ReposToMap parses the specified repotags and returns a map with repositories
// as keys and the corresponding arrays of tags as values.
-func ReposToMap(repotags []string) map[string][]string {
+func ReposToMap(repotags []string) (map[string][]string, error) {
// map format is repo -> tag
repos := make(map[string][]string)
for _, repo := range repotags {
var repository, tag string
if len(repo) > 0 {
- li := strings.LastIndex(repo, ":")
- repository = repo[0:li]
- tag = repo[li+1:]
+ named, err := reference.ParseNormalizedNamed(repo)
+ repository = named.Name()
+ if err != nil {
+ return nil, err
+ }
+ if ref, ok := named.(reference.NamedTagged); ok {
+ tag = ref.Tag()
+ } else if ref, ok := named.(reference.Canonical); ok {
+ tag = ref.Digest().String()
+ }
}
repos[repository] = append(repos[repository], tag)
}
if len(repos) == 0 {
repos["<none>"] = []string{"<none>"}
}
- return repos
+ return repos, nil
}
// GetAdditionalTags returns a list of reference.NamedTagged for the
@@ -141,3 +149,28 @@ func IsValidImageURI(imguri string) (bool, error) {
}
return true, nil
}
+
+// imageNameForSaveDestination returns a Docker-like reference appropriate for saving img,
+// which the user referred to as imgUserInput; or an empty string, if there is no appropriate
+// reference.
+func imageNameForSaveDestination(img *Image, imgUserInput string) string {
+ if strings.Contains(img.ID(), imgUserInput) {
+ return ""
+ }
+
+ prepend := ""
+ localRegistryPrefix := fmt.Sprintf("%s/", DefaultLocalRegistry)
+ if !strings.HasPrefix(imgUserInput, localRegistryPrefix) {
+ // we need to check if localhost was added to the image name in NewFromLocal
+ for _, name := range img.Names() {
+ // If the user is saving an image in the localhost registry, getLocalImage need
+ // a name that matches the format localhost/<tag1>:<tag2> or localhost/<tag>:latest to correctly
+ // set up the manifest and save.
+ if strings.HasPrefix(name, localRegistryPrefix) && (strings.HasSuffix(name, imgUserInput) || strings.HasSuffix(name, fmt.Sprintf("%s:latest", imgUserInput))) {
+ prepend = localRegistryPrefix
+ break
+ }
+ }
+ }
+ return fmt.Sprintf("%s%s", prepend, imgUserInput)
+}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 314799309..ab4fc8ba7 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -378,6 +378,58 @@ func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) {
return arr, nil
}
+// AllContainers retrieves all containers from the state
+func (s *InMemoryState) AllContainers() ([]*Container, error) {
+ ctrs := make([]*Container, 0, len(s.containers))
+ for _, ctr := range s.containers {
+ if s.namespace == "" || ctr.config.Namespace == s.namespace {
+ ctrs = append(ctrs, ctr)
+ }
+ }
+
+ return ctrs, nil
+}
+
+// RewriteContainerConfig rewrites a container's configuration.
+// This function is DANGEROUS, even with an in-memory state.
+// Please read the full comment on it in state.go before using it.
+func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
+ if !ctr.valid {
+ return ErrCtrRemoved
+ }
+
+ // If the container does not exist, return error
+ stateCtr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ }
+
+ stateCtr.config = newCfg
+
+ return nil
+}
+
+// RewritePodConfig rewrites a pod's configuration.
+// This function is DANGEROUS, even with in-memory state.
+// Please read the full comment on it in state.go before using it.
+func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
+ if !pod.valid {
+ return ErrPodRemoved
+ }
+
+ // If the pod does not exist, return error
+ statePod, ok := s.pods[pod.ID()]
+ if !ok {
+ pod.valid = false
+ return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found in state", pod.ID())
+ }
+
+ statePod.config = newCfg
+
+ return nil
+}
+
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
@@ -486,18 +538,6 @@ func (s *InMemoryState) AllVolumes() ([]*Volume, error) {
return allVols, nil
}
-// AllContainers retrieves all containers from the state
-func (s *InMemoryState) AllContainers() ([]*Container, error) {
- ctrs := make([]*Container, 0, len(s.containers))
- for _, ctr := range s.containers {
- if s.namespace == "" || ctr.config.Namespace == s.namespace {
- ctrs = append(ctrs, ctr)
- }
- }
-
- return ctrs, nil
-}
-
// Pod retrieves a pod from the state from its full ID
func (s *InMemoryState) Pod(id string) (*Pod, error) {
if id == "" {
diff --git a/libpod/kube.go b/libpod/kube.go
index f34805e39..484127870 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -228,7 +228,11 @@ func containerToV1Container(c *Container) (v1.Container, error) {
return kubeContainer, nil
}
- ports, err := ocicniPortMappingToContainerPort(c.PortMappings())
+ portmappings, err := c.PortMappings()
+ if err != nil {
+ return kubeContainer, err
+ }
+ ports, err := ocicniPortMappingToContainerPort(portmappings)
if err != nil {
return kubeContainer, nil
}
@@ -401,7 +405,7 @@ func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) {
func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
priv := c.Privileged()
ro := c.IsReadOnly()
- allowPrivEscalation := !c.Spec().Process.NoNewPrivileges
+ allowPrivEscalation := !c.config.Spec.Process.NoNewPrivileges
newCaps, err := capAddDrop(c.config.Spec.Process.Capabilities)
if err != nil {
@@ -421,7 +425,13 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
}
if c.User() != "" {
- // It is *possible* that
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ }
+ if err := c.syncContainer(); err != nil {
+ return nil, errors.Wrapf(err, "unable to sync container during YAML generation")
+ }
logrus.Debugf("Looking in container for user: %s", c.User())
u, err := lookup.GetUser(c.state.Mountpoint, c.User())
if err != nil {
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
index db8f20e95..7c9605917 100644
--- a/libpod/lock/in_memory_locks.go
+++ b/libpod/lock/in_memory_locks.go
@@ -89,3 +89,14 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
+
+// FreeAllLocks frees all locks.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *InMemoryManager) FreeAllLocks() error {
+ for _, lock := range m.locks {
+ lock.allocated = false
+ }
+
+ return nil
+}
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
index 1f94171fe..d6841646b 100644
--- a/libpod/lock/lock.go
+++ b/libpod/lock/lock.go
@@ -24,6 +24,20 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
+ // FreeAllLocks frees all allocated locks, in preparation for lock
+ // reallocation.
+ // As this deallocates all presently-held locks, this can be very
+ // dangerous - if there are other processes running that might be
+ // attempting to allocate new locks and free existing locks, we may
+ // encounter races leading to an inconsistent state.
+ // (This is in addition to the fact that FreeAllLocks instantly makes
+ // the state inconsistent simply by using it, and requires a full
+ // lock renumbering to restore consistency!).
+ // In short, this should only be used as part of unit tests, or lock
+ // renumbering, where reasonable guarantees about other processes can be
+ // made.
+ FreeAllLocks() error
}
// Locker is similar to sync.Locker, but provides a method for freeing the lock
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
index 4af58d857..d11fce71a 100644
--- a/libpod/lock/shm/shm_lock.c
+++ b/libpod/lock/shm/shm_lock.c
@@ -203,6 +203,8 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// terminating NULL byte.
// Returns a valid pointer on success or NULL on error.
// If an error occurs, negative ERRNO values will be written to error_code.
+// ERANGE is returned for a mismatch between num_locks and the number of locks
+// available in the the SHM lock struct.
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
int shm_fd;
shm_struct_t *shm;
@@ -255,11 +257,11 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// Need to check the SHM to see if it's actually our locks
if (shm->magic != MAGIC) {
- *error_code = -1 * errno;
+ *error_code = -1 * EBADF;
goto CLEANUP;
}
if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) {
- *error_code = -1 * errno;
+ *error_code = -1 * ERANGE;
goto CLEANUP;
}
@@ -407,6 +409,36 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
return 0;
}
+// Deallocate all semaphores unconditionally.
+// Returns negative ERRNO values.
+int32_t deallocate_all_semaphores(shm_struct_t *shm) {
+ int ret_code;
+ uint i;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Iterate through all bitmaps and reset to unused
+ for (i = 0; i < shm->num_bitmaps; i++) {
+ shm->locks[i].bitmap = 0;
+ }
+
+ // Unlock the allocation control mutex
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
// Lock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
index 87d28e5c1..e70ea8743 100644
--- a/libpod/lock/shm/shm_lock.go
+++ b/libpod/lock/shm/shm_lock.go
@@ -155,6 +155,22 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
return nil
}
+// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
+// other containers and pods.
+func (locks *SHMLocks) DeallocateAllSemaphores() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ retCode := C.deallocate_all_semaphores(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno return from C
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
// LockSemaphore locks the given semaphore.
// If the semaphore is already locked, LockSemaphore will block until the lock
// can be acquired.
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
index 8e7e23fb7..58e4297e2 100644
--- a/libpod/lock/shm/shm_lock.h
+++ b/libpod/lock/shm/shm_lock.h
@@ -40,6 +40,7 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go
index 594eb5d8e..830035881 100644
--- a/libpod/lock/shm/shm_lock_test.go
+++ b/libpod/lock/shm/shm_lock_test.go
@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"runtime"
- "syscall"
"testing"
"time"
@@ -53,12 +52,8 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
}
defer func() {
// Deallocate all locks
- // Ignore ENOENT (lock is not allocated)
- var i uint32
- for i = 0; i < numLocks; i++ {
- if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
- t.Fatalf("Error deallocating semaphore %d: %v", i, err)
- }
+ if err := locks.DeallocateAllSemaphores(); err != nil {
+ t.Fatalf("Error deallocating semaphores: %v", err)
}
if err := locks.Close(); err != nil {
@@ -212,6 +207,25 @@ func TestAllocateDeallocateCycle(t *testing.T) {
})
}
+// Test that DeallocateAllSemaphores deallocates all semaphores
+func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate a lock
+ locks1, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ // Free all locks
+ err = locks.DeallocateAllSemaphores()
+ assert.NoError(t, err)
+
+ // Allocate another lock
+ locks2, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ assert.Equal(t, locks1, locks2)
+ })
+}
+
// Test that locks actually lock
func TestLockSemaphoreActuallyLocks(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
index 94dfd7dd7..8678958ee 100644
--- a/libpod/lock/shm_lock_manager_linux.go
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -71,6 +71,13 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
return lock, nil
}
+// FreeAllLocks frees all locks in the manager.
+// This function is DANGEROUS. Please read the full comment in locks.go before
+// trying to use it.
+func (m *SHMLockManager) FreeAllLocks() error {
+ return m.locks.DeallocateAllSemaphores()
+}
+
// SHMLock is an individual shared memory lock.
type SHMLock struct {
lockID uint32
diff --git a/libpod/lock/shm_lock_manager_unsupported.go b/libpod/lock/shm_lock_manager_unsupported.go
index cbdb2f7bc..1d6e3fcbd 100644
--- a/libpod/lock/shm_lock_manager_unsupported.go
+++ b/libpod/lock/shm_lock_manager_unsupported.go
@@ -27,3 +27,8 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) {
return nil, fmt.Errorf("not supported")
}
+
+// FreeAllLocks is not supported on this platform
+func (m *SHMLockManager) FreeAllLocks() error {
+ return fmt.Errorf("not supported")
+}
diff --git a/libpod/oci.go b/libpod/oci.go
index e55bd57dc..26d2c6ef1 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -321,7 +321,6 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBPOD_USERNS_CONFIGURED=%s", os.Getenv("_LIBPOD_USERNS_CONFIGURED")))
cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%s", os.Getenv("_LIBPOD_ROOTLESS_UID")))
cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME")))
- cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() {
ports, err := bindPorts(ctr.config.PortMappings)
diff --git a/libpod/options.go b/libpod/options.go
index d965c058e..e22c81f91 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@@ -148,6 +149,7 @@ func WithOCIRuntime(runtime string) RuntimeOption {
}
rt.config.OCIRuntime = runtime
+ rt.config.RuntimePath = nil
return nil
}
@@ -393,6 +395,22 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
}
}
+// WithRenumber instructs libpod to perform a lock renumbering while
+// initializing. This will handle migrations from early versions of libpod with
+// file locks to newer versions with SHM locking, as well as changes in the
+// number of configured locks.
+func WithRenumber() RuntimeOption {
+ return func(rt *Runtime) error {
+ if rt.valid {
+ return ErrRuntimeFinalized
+ }
+
+ rt.doRenumber = true
+
+ return nil
+ }
+}
+
// Container Creation Options
// WithShmDir sets the directory that should be mounted on /dev/shm.
@@ -886,10 +904,10 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmo
}
ctr.config.PostConfigureNetNS = postConfigureNetNS
- ctr.config.CreateNetNS = true
+ ctr.config.NetMode = namespaces.NetworkMode(netmode)
+ ctr.config.CreateNetNS = !ctr.config.NetMode.IsUserDefined()
ctr.config.PortMappings = portMappings
ctr.config.Networks = networks
- ctr.config.NetMode = namespaces.NetworkMode(netmode)
return nil
}
@@ -1058,7 +1076,7 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
// from a container passed in to the --volumes-from flag.
// This stores the built-in volume information in the Config so we can
// add them when creating the container.
-func WithLocalVolumes(volumes []string) CtrCreateOption {
+func WithLocalVolumes(volumes []spec.Mount) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
return ErrCtrFinalized
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 0f1f115e8..25e4e77d7 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -1,10 +1,8 @@
package libpod
import (
- "context"
"fmt"
"path/filepath"
- "strings"
"time"
"github.com/containers/storage/pkg/stringid"
@@ -85,97 +83,3 @@ func (p *Pod) refresh() error {
// Save changes
return p.save()
}
-
-// Visit a node on a container graph and start the container, or set an error if
-// a dependency failed to start. if restart is true, startNode will restart the node instead of starting it.
-func startNode(ctx context.Context, node *containerNode, setError bool, ctrErrors map[string]error, ctrsVisited map[string]bool, restart bool) {
- // First, check if we have already visited the node
- if ctrsVisited[node.id] {
- return
- }
-
- // If setError is true, a dependency of us failed
- // Mark us as failed and recurse
- if setError {
- // Mark us as visited, and set an error
- ctrsVisited[node.id] = true
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
-
- // Hit anyone who depends on us, and set errors on them too
- for _, successor := range node.dependedOn {
- startNode(ctx, successor, true, ctrErrors, ctrsVisited, restart)
- }
-
- return
- }
-
- // Have all our dependencies started?
- // If not, don't visit the node yet
- depsVisited := true
- for _, dep := range node.dependsOn {
- depsVisited = depsVisited && ctrsVisited[dep.id]
- }
- if !depsVisited {
- // Don't visit us yet, all dependencies are not up
- // We'll hit the dependencies eventually, and when we do it will
- // recurse here
- return
- }
-
- // Going to try to start the container, mark us as visited
- ctrsVisited[node.id] = true
-
- ctrErrored := false
-
- // Check if dependencies are running
- // Graph traversal means we should have started them
- // But they could have died before we got here
- // Does not require that the container be locked, we only need to lock
- // the dependencies
- depsStopped, err := node.container.checkDependenciesRunning()
- if err != nil {
- ctrErrors[node.id] = err
- ctrErrored = true
- } else if len(depsStopped) > 0 {
- // Our dependencies are not running
- depsList := strings.Join(depsStopped, ",")
- ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
- ctrErrored = true
- }
-
- // Lock before we start
- node.container.lock.Lock()
-
- // Sync the container to pick up current state
- if !ctrErrored {
- if err := node.container.syncContainer(); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
-
- // Start the container (only if it is not running)
- if !ctrErrored {
- if !restart && node.container.state.State != ContainerStateRunning {
- if err := node.container.initAndStart(ctx); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
- if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
- if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
- ctrErrored = true
- ctrErrors[node.id] = err
- }
- }
- }
-
- node.container.lock.Unlock()
-
- // Recurse to anyone who depends on us and start them
- for _, successor := range node.dependedOn {
- startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
- }
-
- return
-}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index c975f628b..52f4523ba 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -7,6 +7,7 @@ import (
"os/exec"
"path/filepath"
"sync"
+ "syscall"
"github.com/BurntSushi/toml"
is "github.com/containers/image/storage"
@@ -79,7 +80,8 @@ type RuntimeOption func(*Runtime) error
// Runtime is the core libpod runtime
type Runtime struct {
- config *RuntimeConfig
+ config *RuntimeConfig
+
state State
store storage.Store
storageService *storageService
@@ -88,12 +90,23 @@ type Runtime struct {
netPlugin ocicni.CNIPlugin
ociRuntimePath OCIRuntimePath
conmonPath string
- valid bool
- lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
lockManager lock.Manager
configuredFrom *runtimeConfiguredFrom
+
+ // doRenumber indicates that the runtime should perform a lock renumber
+ // during initialization.
+ // Once the runtime has been initialized and returned, this variable is
+ // unused.
+ doRenumber bool
+
+ // valid indicates whether the runtime is ready to use.
+ // valid is set to true when a runtime is returned from GetRuntime(),
+ // and remains true until the runtime is shut down (rendering its
+ // storage unusable). When valid is false, the runtime cannot be used.
+ valid bool
+ lock sync.RWMutex
}
// OCIRuntimePath contains information about an OCI runtime.
@@ -130,6 +143,12 @@ type RuntimeConfig struct {
OCIRuntime string `toml:"runtime"`
// OCIRuntimes are the set of configured OCI runtimes (default is runc)
OCIRuntimes map[string][]string `toml:"runtimes"`
+ // RuntimePath is the path to OCI runtime binary for launching
+ // containers.
+ // The first path pointing to a valid file will be used
+ // This is used only when there are no OCIRuntime/OCIRuntimes defined. It
+ // is used only to be backward compatible with older versions of Podman.
+ RuntimePath []string `toml:"runtime_path"`
// ConmonPath is the path to the Conmon binary used for managing
// containers
// The first path pointing to a valid file will be used
@@ -389,7 +408,7 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
// If the configuration file was not found but we are running in rootless, a subset of the
// global config file is used.
for _, path := range []string{OverrideConfigPath, ConfigPath} {
- contents, err := ioutil.ReadFile(OverrideConfigPath)
+ contents, err := ioutil.ReadFile(path)
if err != nil {
// Ignore any error, the file might not be readable by us.
continue
@@ -403,6 +422,7 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
runtime.config.ConmonPath = tmpConfig.ConmonPath
runtime.config.ConmonEnvVars = tmpConfig.ConmonEnvVars
runtime.config.OCIRuntimes = tmpConfig.OCIRuntimes
+ runtime.config.RuntimePath = tmpConfig.RuntimePath
runtime.config.CNIPluginDir = tmpConfig.CNIPluginDir
runtime.config.NoPivotRoot = tmpConfig.NoPivotRoot
break
@@ -485,12 +505,37 @@ func NewRuntimeFromConfig(configPath string, options ...RuntimeOption) (runtime
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(runtime *Runtime) (err error) {
+
+ // Backward compatibility for `runtime_path`
+ if runtime.config.RuntimePath != nil {
+ // Don't print twice in rootless mode.
+ if os.Geteuid() == 0 {
+ logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`")
+ logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used")
+ }
+
+ // Transform `runtime_path` into `runtimes` and `runtime`.
+ name := filepath.Base(runtime.config.RuntimePath[0])
+ runtime.config.OCIRuntime = name
+ runtime.config.OCIRuntimes = map[string][]string{name: runtime.config.RuntimePath}
+ }
+
// Find a working OCI runtime binary
foundRuntime := false
// If runtime is an absolute path, then use it as it is.
- if runtime.config.OCIRuntime[0] == '/' {
+ if runtime.config.OCIRuntime != "" && runtime.config.OCIRuntime[0] == '/' {
foundRuntime = true
runtime.ociRuntimePath = OCIRuntimePath{Name: filepath.Base(runtime.config.OCIRuntime), Paths: []string{runtime.config.OCIRuntime}}
+ stat, err := os.Stat(runtime.config.OCIRuntime)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return errors.Wrapf(err, "the specified OCI runtime %s does not exist", runtime.config.OCIRuntime)
+ }
+ return errors.Wrapf(err, "cannot stat the OCI runtime path %s", runtime.config.OCIRuntime)
+ }
+ if !stat.Mode().IsRegular() {
+ return fmt.Errorf("the specified OCI runtime %s is not a valid file", runtime.config.OCIRuntime)
+ }
} else {
// If not, look it up in the configuration.
paths := runtime.config.OCIRuntimes[runtime.config.OCIRuntime]
@@ -731,6 +776,7 @@ func makeRuntime(runtime *Runtime) (err error) {
aliveLock.Unlock()
}
}()
+
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If the file doesn't exist, we need to refresh the state
@@ -756,12 +802,35 @@ func makeRuntime(runtime *Runtime) (err error) {
if err != nil {
return err
}
+ } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
+ logrus.Debugf("Number of locks does not match - removing old locks")
+
+ // ERANGE indicates a lock numbering mismatch.
+ // Since we're renumbering, this is not fatal.
+ // Remove the earlier set of locks and recreate.
+ if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
+ return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
+ }
+
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return err
+ }
} else {
return err
}
}
runtime.lockManager = manager
+ // If we're renumbering locks, do it now.
+ // It breaks out of normal runtime init, and will not return a valid
+ // runtime.
+ if runtime.doRenumber {
+ if err := runtime.renumberLocks(); err != nil {
+ return err
+ }
+ }
+
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 9afdef7b6..2ec8d0795 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -10,9 +10,12 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulule/deepcopier"
@@ -44,6 +47,10 @@ func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "newContainer")
+ span.SetTag("type", "runtime")
+ defer span.Finish()
+
if rSpec == nil {
return nil, errors.Wrapf(ErrInvalidArg, "must provide a valid runtime spec to create container")
}
@@ -175,9 +182,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
if err != nil {
newVol, err := r.newVolume(ctx, WithVolumeName(vol.Source))
if err != nil {
- logrus.Errorf("error creating named volume %q: %v", vol.Source, err)
+ return nil, errors.Wrapf(err, "error creating named volume %q", vol.Source)
}
ctr.config.Spec.Mounts[i].Source = newVol.MountPoint()
+ if err := ctr.copyWithTarFromImage(ctr.config.Spec.Mounts[i].Destination, ctr.config.Spec.Mounts[i].Source); err != nil && !os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Source)
+ }
continue
}
ctr.config.Spec.Mounts[i].Source = volInfo.MountPoint()
@@ -223,17 +233,23 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
// RemoveContainer removes the given container
// If force is specified, the container will be stopped first
+// If removeVolume is specified, named volumes used by the container will
+// be removed also if and only if the container is the sole user
// Otherwise, RemoveContainer will return an error if the container is running
-func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool) error {
+func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
r.lock.Lock()
defer r.lock.Unlock()
- return r.removeContainer(ctx, c, force)
+ return r.removeContainer(ctx, c, force, removeVolume)
}
// Internal function to remove a container
// Locks the container, but does not lock the runtime
-func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool) error {
+func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
+ span, _ := opentracing.StartSpanFromContext(ctx, "removeContainer")
+ span.SetTag("type", "runtime")
+ defer span.Finish()
+
if !c.valid {
if ok, _ := r.state.HasContainer(c.ID()); !ok {
// Container probably already removed
@@ -246,6 +262,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
// To avoid races around removing a container and the pod it is in
var pod *Pod
var err error
+ runtime := c.runtime
if c.config.Pod != "" {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
@@ -331,6 +348,13 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
return errors.Wrapf(ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
}
+ var volumes []string
+ if removeVolume {
+ volumes, err = c.namedVolumes()
+ if err != nil {
+ logrus.Errorf("unable to retrieve builtin volumes for container %v: %v", c.ID(), err)
+ }
+ }
var cleanupErr error
// Remove the container from the state
if c.config.Pod != "" {
@@ -395,6 +419,14 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
}
+ for _, v := range volumes {
+ if volume, err := runtime.state.Volume(v); err == nil {
+ if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
+ logrus.Errorf("cleanup volume (%s): %v", v, err)
+ }
+ }
+ }
+
return cleanupErr
}
@@ -564,3 +596,16 @@ func (r *Runtime) Export(name string, path string) error {
return ctr.Export(path)
}
+
+// RemoveContainersFromStorage attempt to remove containers from storage that do not exist in libpod database
+func (r *Runtime) RemoveContainersFromStorage(ctrs []string) {
+ for _, i := range ctrs {
+ // if the container does not exist in database, attempt to remove it from storage
+ if _, err := r.LookupContainer(i); err != nil && errors.Cause(err) == image.ErrNoSuchCtr {
+ r.storageService.UnmountContainerImage(i, true)
+ if err := r.storageService.DeleteContainer(i); err != nil && errors.Cause(err) != storage.ErrContainerUnknown {
+ logrus.Errorf("Failed to remove container %q from storage: %s", i, err)
+ }
+ }
+ }
+}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index c20aa77a3..451c2ebe7 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -3,7 +3,6 @@ package libpod
import (
"context"
"fmt"
- "github.com/opencontainers/image-spec/specs-go/v1"
"io"
"io/ioutil"
"net/http"
@@ -15,6 +14,11 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
"github.com/pkg/errors"
+
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
+ ociarchive "github.com/containers/image/oci/archive"
+ "github.com/opencontainers/image-spec/specs-go/v1"
)
// Runtime API
@@ -43,7 +47,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool)
if len(imageCtrs) > 0 && len(img.Names()) <= 1 {
if force {
for _, ctr := range imageCtrs {
- if err := r.removeContainer(ctx, ctr, true); err != nil {
+ if err := r.removeContainer(ctx, ctr, true, false); err != nil {
return "", errors.Wrapf(err, "error removing image %s: container %s using image could not be removed", img.ID(), ctr.ID())
}
}
@@ -211,3 +215,41 @@ func downloadFromURL(source string) (string, error) {
return outFile.Name(), nil
}
+
+// LoadImage loads a container image into local storage
+func (r *Runtime) LoadImage(ctx context.Context, name, inputFile string, writer io.Writer, signaturePolicy string) (string, error) {
+ var newImages []*image.Image
+ src, err := dockerarchive.ParseReference(inputFile) // FIXME? We should add dockerarchive.NewReference()
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ // generate full src name with specified image:tag
+ src, err := ociarchive.NewReference(inputFile, name) // imageName may be ""
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ src, err := directory.NewReference(inputFile)
+ if err == nil {
+ newImages, err = r.ImageRuntime().LoadFromArchiveReference(ctx, src, signaturePolicy, writer)
+ }
+ if err != nil {
+ return "", errors.Wrapf(err, "error pulling %q", name)
+ }
+ }
+ }
+ return getImageNames(newImages), nil
+}
+
+func getImageNames(images []*image.Image) string {
+ var names string
+ for i := range images {
+ if i == 0 {
+ names = images[i].InputName
+ } else {
+ names += ", " + images[i].InputName
+ }
+ }
+ return names
+}
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index c6d497c0c..c378d18e4 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -117,15 +117,6 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
if err := pod.save(); err != nil {
return nil, err
}
-
- // Once the pod infra container has been created, we start it
- if err := ctr.Start(ctx); err != nil {
- // If the infra container does not start, we need to tear the pod down.
- if err2 := r.removePod(ctx, pod, true, true); err2 != nil {
- logrus.Errorf("Error removing pod after infra container failed to start: %v", err2)
- }
- return nil, errors.Wrapf(err, "error starting Infra Container")
- }
}
return pod, nil
diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go
new file mode 100644
index 000000000..125cf0825
--- /dev/null
+++ b/libpod/runtime_renumber.go
@@ -0,0 +1,57 @@
+package libpod
+
+import (
+ "github.com/pkg/errors"
+)
+
+// renumberLocks reassigns lock numbers for all containers and pods in the
+// state.
+// TODO: It would be desirable to make it impossible to call this until all
+// other libpod sessions are dead.
+// Possibly use a read-write file lock, with all non-renumber podmans owning the
+// lock as read, renumber attempting to take a write lock?
+// The alternative is some sort of session tracking, and I don't know how
+// reliable that can be.
+func (r *Runtime) renumberLocks() error {
+ // Start off by deallocating all locks
+ if err := r.lockManager.FreeAllLocks(); err != nil {
+ return err
+ }
+
+ allCtrs, err := r.state.AllContainers()
+ if err != nil {
+ return err
+ }
+ for _, ctr := range allCtrs {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
+ }
+
+ ctr.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
+ return err
+ }
+ }
+ allPods, err := r.state.AllPods()
+ if err != nil {
+ return err
+ }
+ for _, pod := range allPods {
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
+ }
+
+ pod.config.LockID = lock.ID()
+
+ // Write the new lock ID
+ if err := r.state.RewritePodConfig(pod, pod.config); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index 3921758ee..11f37ad4b 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -2,6 +2,9 @@ package libpod
import (
"context"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "strings"
)
// Contains the public Runtime API for volumes
@@ -16,7 +19,7 @@ type VolumeCreateOption func(*Volume) error
type VolumeFilter func(*Volume) bool
// RemoveVolume removes a volumes
-func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
@@ -32,10 +35,39 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- v.lock.Lock()
- defer v.lock.Unlock()
+ return r.removeVolume(ctx, v, force)
+}
+
+// RemoveVolumes removes a slice of volumes or all with a force bool
+func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, force bool) ([]string, error) {
+ var (
+ vols []*Volume
+ err error
+ deletedVols []string
+ )
+ if all {
+ vols, err = r.Volumes()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to get all volumes")
+ }
+ } else {
+ for _, i := range volumes {
+ vol, err := r.GetVolume(i)
+ if err != nil {
+ return nil, err
+ }
+ vols = append(vols, vol)
+ }
+ }
- return r.removeVolume(ctx, v, force, prune)
+ for _, vol := range vols {
+ if err := r.RemoveVolume(ctx, vol, force); err != nil {
+ return deletedVols, err
+ }
+ logrus.Debugf("removed volume %s", vol.Name())
+ deletedVols = append(deletedVols, vol.Name())
+ }
+ return deletedVols, nil
}
// GetVolume retrieves a volume by its name
@@ -47,7 +79,21 @@ func (r *Runtime) GetVolume(name string) (*Volume, error) {
return nil, ErrRuntimeStopped
}
- return r.state.Volume(name)
+ vol, err := r.state.Volume(name)
+ if err == nil {
+ return vol, err
+ }
+
+ vols, err := r.GetAllVolumes()
+ if err != nil {
+ return nil, err
+ }
+ for _, v := range vols {
+ if strings.HasPrefix(v.Name(), name) {
+ return v, nil
+ }
+ }
+ return nil, errors.Errorf("unable to find volume %s", name)
}
// HasVolume checks to see if a volume with the given name exists
@@ -105,3 +151,27 @@ func (r *Runtime) GetAllVolumes() ([]*Volume, error) {
return r.state.AllVolumes()
}
+
+// PruneVolumes removes unused volumes from the system
+func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) {
+ var (
+ prunedIDs []string
+ pruneErrors []error
+ )
+ vols, err := r.GetAllVolumes()
+ if err != nil {
+ pruneErrors = append(pruneErrors, err)
+ return nil, pruneErrors
+ }
+
+ for _, vol := range vols {
+ if err := r.RemoveVolume(ctx, vol, false); err != nil {
+ if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved {
+ pruneErrors = append(pruneErrors, err)
+ }
+ continue
+ }
+ prunedIDs = append(prunedIDs, vol.Name())
+ }
+ return prunedIDs, pruneErrors
+}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 0727cfedf..838c0167a 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
- lock, err := r.lockManager.AllocateLock()
- if err != nil {
- return nil, errors.Wrapf(err, "error allocating lock for new volume")
- }
- volume.lock = lock
- volume.config.LockID = volume.lock.ID()
-
volume.valid = true
// Add the volume to state
@@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
-func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
+func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
- return ErrNoSuchVolume
+ if ok, _ := r.state.HasVolume(v.Name()); !ok {
+ return nil
+ }
+ return ErrVolumeRemoved
}
deps, err := r.state.VolumeInUse(v)
@@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
return err
}
if len(deps) != 0 {
- if prune {
- return ErrVolumeBeingUsed
- }
depsStr := strings.Join(deps, ", ")
if !force {
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
@@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
}
}
- // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
- if err := v.teardownStorage(); err != nil {
- return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
- }
+ // Set volume as invalid so it can no longer be used
+ v.valid = false
// Remove the volume from the state
if err := r.state.RemoveVolume(v); err != nil {
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
- // Set volume as invalid so it can no longer be used
- v.valid = false
+ // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
+ if err := v.teardownStorage(); err != nil {
+ return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
+ }
+
+ logrus.Debugf("Removed volume %s", v.Name())
return nil
}
diff --git a/libpod/state.go b/libpod/state.go
index 88d89f673..98282fc83 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -97,6 +97,30 @@ type State interface {
// returned.
AllContainers() ([]*Container, error)
+ // PLEASE READ FULL DESCRIPTION BEFORE USING.
+ // Rewrite a container's configuration.
+ // This function breaks libpod's normal prohibition on a read-only
+ // configuration, and as such should be used EXTREMELY SPARINGLY and
+ // only in very specific circumstances.
+ // Specifically, it is ONLY safe to use thing function to make changes
+ // that result in a functionally identical configuration (migrating to
+ // newer, but identical, configuration fields), or during libpod init
+ // WHILE HOLDING THE ALIVE LOCK (to prevent other libpod instances from
+ // being initialized).
+ // Most things in config can be changed by this, but container ID and
+ // name ABSOLUTELY CANNOT BE ALTERED. If you do so, there is a high
+ // potential for database corruption.
+ // There are a lot of capital letters and conditions here, but the short
+ // answer is this: use this only very sparingly, and only if you really
+ // know what you're doing.
+ RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
+ // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING.
+ // This function is identical to RewriteContainerConfig, save for the
+ // fact that it is used with pods instead.
+ // It is subject to the same conditions as RewriteContainerConfig.
+ // Please do not use this unless you know what you're doing.
+ RewritePodConfig(pod *Pod, newCfg *PodConfig) error
+
// Accepts full ID of pod.
// If the pod given is not in the set namespace, an error will be
// returned.
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 4bd00ab55..be68a2d69 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -1298,6 +1298,78 @@ func TestCannotUseBadIDAsGenericDependency(t *testing.T) {
})
}
+func TestRewriteContainerConfigDoesNotExist(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ err := state.RewriteContainerConfig(&Container{}, &ContainerConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewriteContainerConfigNotInState(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+ err = state.RewriteContainerConfig(testCtr, &ContainerConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewriteContainerConfigRewritesConfig(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ err = state.AddContainer(testCtr)
+ assert.NoError(t, err)
+
+ testCtr.config.LogPath = "/another/path/"
+
+ err = state.RewriteContainerConfig(testCtr, testCtr.config)
+ assert.NoError(t, err)
+
+ testCtrFromState, err := state.Container(testCtr.ID())
+ assert.NoError(t, err)
+
+ testContainersEqual(t, testCtrFromState, testCtr, true)
+ })
+}
+
+func TestRewritePodConfigDoesNotExist(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ err := state.RewritePodConfig(&Pod{}, &PodConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewritePodConfigNotInState(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
+ assert.NoError(t, err)
+ err = state.RewritePodConfig(testPod, &PodConfig{})
+ assert.Error(t, err)
+ })
+}
+
+func TestRewritePodConfigRewritesConfig(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
+ assert.NoError(t, err)
+
+ err = state.AddPod(testPod)
+ assert.NoError(t, err)
+
+ testPod.config.CgroupParent = "/another_cgroup_parent"
+
+ err = state.RewritePodConfig(testPod, testPod.config)
+ assert.NoError(t, err)
+
+ testPodFromState, err := state.Pod(testPod.ID())
+ assert.NoError(t, err)
+
+ testPodsEqual(t, testPodFromState, testPod, true)
+ })
+}
+
func TestGetPodDoesNotExist(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Pod("doesnotexist")
diff --git a/libpod/storage.go b/libpod/storage.go
index 17d231171..1a7b13da6 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/opencontainers/image-spec/specs-go/v1"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -62,6 +63,10 @@ func (metadata *RuntimeContainerMetadata) SetMountLabel(mountLabel string) {
// CreateContainerStorage creates the storage end of things. We already have the container spec created
// TO-DO We should be passing in an Image object in the future.
func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID string, options storage.ContainerOptions) (cinfo ContainerInfo, err error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "createContainerStorage")
+ span.SetTag("type", "storageService")
+ defer span.Finish()
+
var imageConfig *v1.Image
if imageName != "" {
var ref types.ImageReference
diff --git a/libpod/volume.go b/libpod/volume.go
index 026a3bf49..74878b6a4 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -1,7 +1,5 @@
package libpod
-import "github.com/containers/libpod/libpod/lock"
-
// Volume is the type used to create named volumes
// TODO: all volumes should be created using this and the Volume API
type Volume struct {
@@ -9,7 +7,6 @@ type Volume struct {
valid bool
runtime *Runtime
- lock lock.Locker
}
// VolumeConfig holds the volume's config information
@@ -17,8 +14,6 @@ type Volume struct {
type VolumeConfig struct {
// Name of the volume
Name string `json:"name"`
- // ID of this volume's lock
- LockID uint32 `json:"lockID"`
Labels map[string]string `json:"labels"`
MountPoint string `json:"mountPoint"`
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 800e6d106..35f0ca19d 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -5,10 +5,6 @@ import (
"path/filepath"
)
-// VolumePath is the path under which all volumes that are created using the
-// local driver will be created
-// const VolumePath = "/var/lib/containers/storage/volumes"
-
// Creates a new volume
func newVolume(runtime *Runtime) (*Volume, error) {
volume := new(Volume)
@@ -22,8 +18,5 @@ func newVolume(runtime *Runtime) (*Volume, error) {
// teardownStorage deletes the volume from volumePath
func (v *Volume) teardownStorage() error {
- if !v.valid {
- return ErrNoSuchVolume
- }
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
}