summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go363
-rw-r--r--libpod/boltdb_state_internal.go95
-rw-r--r--libpod/container.go157
-rw-r--r--libpod/container_api.go16
-rw-r--r--libpod/container_config.go14
-rw-r--r--libpod/container_inspect.go17
-rw-r--r--libpod/container_internal.go70
-rw-r--r--libpod/container_internal_linux.go122
-rw-r--r--libpod/container_validate.go21
-rw-r--r--libpod/define/container_inspect.go2
-rw-r--r--libpod/define/errors.go19
-rw-r--r--libpod/define/pod_inspect.go2
-rw-r--r--libpod/define/podstate.go7
-rw-r--r--libpod/events.go12
-rw-r--r--libpod/events/config.go8
-rw-r--r--libpod/events/events.go8
-rw-r--r--libpod/events/journal_linux.go6
-rw-r--r--libpod/events/logfile.go2
-rw-r--r--libpod/filters/containers.go161
-rw-r--r--libpod/filters/pods.go90
-rw-r--r--libpod/healthcheck.go2
-rw-r--r--libpod/healthcheck_linux.go4
-rw-r--r--libpod/image/image.go28
-rw-r--r--libpod/image/manifests.go36
-rw-r--r--libpod/image/pull.go129
-rw-r--r--libpod/image/pull_test.go67
-rw-r--r--libpod/image/utils.go3
-rw-r--r--libpod/in_memory_state.go262
-rw-r--r--libpod/kube.go2
-rw-r--r--libpod/network/config.go15
-rw-r--r--libpod/network/create.go172
-rw-r--r--libpod/network/create_test.go131
-rw-r--r--libpod/network/files.go6
-rw-r--r--libpod/network/netconflist.go28
-rw-r--r--libpod/network/netconflist_test.go70
-rw-r--r--libpod/network/network.go28
-rw-r--r--libpod/network/subnet.go14
-rw-r--r--libpod/network/subnet_test.go62
-rw-r--r--libpod/networking_linux.go289
-rw-r--r--libpod/oci_conmon_exec_linux.go5
-rw-r--r--libpod/oci_conmon_linux.go19
-rw-r--r--libpod/options.go35
-rw-r--r--libpod/pod_api.go4
-rw-r--r--libpod/pod_status.go8
-rw-r--r--libpod/rootless_cni_linux.go38
-rw-r--r--libpod/runtime.go4
-rw-r--r--libpod/runtime_ctr.go57
-rw-r--r--libpod/runtime_migrate.go2
-rw-r--r--libpod/runtime_pod_infra_linux.go1
-rw-r--r--libpod/runtime_volume.go12
-rw-r--r--libpod/runtime_volume_linux.go2
-rw-r--r--libpod/state.go13
-rw-r--r--libpod/state_test.go26
-rw-r--r--libpod/util.go8
54 files changed, 2230 insertions, 544 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 9dd5ca465..dcb2ff751 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -50,10 +50,12 @@ type BoltState struct {
// containers in the pod.
// - allPodsBkt: Map of ID to name containing only pods. Used for pod lookup
// operations.
-// - execBkt: Map of exec session ID to exec session - contains a sub-bucket for
-// each exec session in the DB.
-// - execRegistryBkt: Map of exec session ID to nothing. Contains one entry for
-// each exec session. Used for iterating through all exec sessions.
+// - execBkt: Map of exec session ID to container ID - used for resolving
+// exec session IDs to the containers that hold the exec session.
+// - aliasesBkt - Contains a bucket for each CNI network, which contain a map of
+// network alias (an extra name for containers in DNS) to the ID of the
+// container holding the alias. Aliases must be unique per-network, and cannot
+// conflict with names registered in nameRegistryBkt.
// - runtimeConfigBkt: Contains configuration of the libpod instance that
// initially created the database. This must match for any further instances
// that access the database, to ensure that state mismatches with
@@ -969,6 +971,359 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
+// GetNetworks returns the CNI networks this container is a part of.
+func (s *BoltState) GetNetworks(ctr *Container) ([]string, error) {
+ if !s.valid {
+ return nil, define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ networks := []string{}
+
+ err = db.View(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrNetworkBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworkBkt == nil {
+ return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not joined to any CNI networks", ctr.ID())
+ }
+
+ return ctrNetworkBkt.ForEach(func(network, v []byte) error {
+ networks = append(networks, string(network))
+ return nil
+ })
+ })
+ if err != nil {
+ return nil, err
+ }
+ return networks, nil
+}
+
+// GetNetworkAliases retrieves the network aliases for the given container in
+// the given CNI network.
+func (s *BoltState) GetNetworkAliases(ctr *Container, network string) ([]string, error) {
+ if !s.valid {
+ return nil, define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ aliases := []string{}
+
+ err = db.View(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrNetworkBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworkBkt == nil {
+ // No networks joined, so no aliases
+ return nil
+ }
+
+ inNetwork := ctrNetworkBkt.Get([]byte(network))
+ if inNetwork == nil {
+ return errors.Wrapf(define.ErrNoAliases, "container %s is not part of network %s, no aliases found", ctr.ID(), network)
+ }
+
+ ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
+ if ctrAliasesBkt == nil {
+ // No aliases
+ return nil
+ }
+
+ netAliasesBkt := ctrAliasesBkt.Bucket([]byte(network))
+ if netAliasesBkt == nil {
+ // No aliases for this specific network.
+ return nil
+ }
+
+ return netAliasesBkt.ForEach(func(alias, v []byte) error {
+ aliases = append(aliases, string(alias))
+ return nil
+ })
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return aliases, nil
+}
+
+// GetAllNetworkAliases retrieves the network aliases for the given container in
+// all CNI networks.
+func (s *BoltState) GetAllNetworkAliases(ctr *Container) (map[string][]string, error) {
+ if !s.valid {
+ return nil, define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ aliases := make(map[string][]string)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
+ if ctrAliasesBkt == nil {
+ // No aliases present
+ return nil
+ }
+
+ ctrNetworkBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworkBkt == nil {
+ // No networks joined, so no aliases
+ return nil
+ }
+
+ return ctrNetworkBkt.ForEach(func(network, v []byte) error {
+ netAliasesBkt := ctrAliasesBkt.Bucket(network)
+ if netAliasesBkt == nil {
+ return nil
+ }
+
+ netAliases := []string{}
+
+ _ = netAliasesBkt.ForEach(func(alias, v []byte) error {
+ netAliases = append(netAliases, string(alias))
+ return nil
+ })
+
+ aliases[string(network)] = netAliases
+ return nil
+ })
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return aliases, nil
+}
+
+// NetworkConnect adds the given container to the given network. If aliases are
+// specified, those will be added to the given network.
+func (s *BoltState) NetworkConnect(ctr *Container, network string, aliases []string) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ return db.Update(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrAliasesBkt, err := dbCtr.CreateBucketIfNotExists(aliasesBkt)
+ if err != nil {
+ return errors.Wrapf(err, "error creating aliases bucket for container %s", ctr.ID())
+ }
+
+ ctrNetworksBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworksBkt == nil {
+ ctrNetworksBkt, err = dbCtr.CreateBucket(networksBkt)
+ if err != nil {
+ return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID())
+ }
+ ctrNetworks := ctr.config.Networks
+ if len(ctrNetworks) == 0 {
+ ctrNetworks = []string{ctr.runtime.netPlugin.GetDefaultNetworkName()}
+ }
+ // Copy in all the container's CNI networks
+ for _, net := range ctrNetworks {
+ if err := ctrNetworksBkt.Put([]byte(net), ctrID); err != nil {
+ return errors.Wrapf(err, "error adding container %s network %s to DB", ctr.ID(), net)
+ }
+ }
+ }
+ netConnected := ctrNetworksBkt.Get([]byte(network))
+ if netConnected != nil {
+ return errors.Wrapf(define.ErrNetworkExists, "container %s is already connected to CNI network %q", ctr.ID(), network)
+ }
+
+ // Add the network
+ if err := ctrNetworksBkt.Put([]byte(network), ctrID); err != nil {
+ return errors.Wrapf(err, "error adding container %s to network %s in DB", ctr.ID(), network)
+ }
+
+ ctrNetAliasesBkt, err := ctrAliasesBkt.CreateBucketIfNotExists([]byte(network))
+ if err != nil {
+ return errors.Wrapf(err, "error adding container %s network aliases bucket for network %s", ctr.ID(), network)
+ }
+ for _, alias := range aliases {
+ if err := ctrNetAliasesBkt.Put([]byte(alias), ctrID); err != nil {
+ return errors.Wrapf(err, "error adding container %s network alias %s for network %s", ctr.ID(), alias, network)
+ }
+ }
+ return nil
+ })
+}
+
+// NetworkDisconnect disconnects the container from the given network, also
+// removing any aliases in the network.
+func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ return db.Update(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
+ ctrNetworksBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworksBkt == nil {
+ return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not connected to any CNI networks, so cannot disconnect", ctr.ID())
+ }
+ netConnected := ctrNetworksBkt.Get([]byte(network))
+ if netConnected == nil {
+ return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not connected to CNI network %q", ctr.ID(), network)
+ }
+
+ if err := ctrNetworksBkt.Delete([]byte(network)); err != nil {
+ return errors.Wrapf(err, "error removing container %s from network %s", ctr.ID(), network)
+ }
+
+ if ctrAliasesBkt != nil {
+ bktExists := ctrAliasesBkt.Bucket([]byte(network))
+ if bktExists == nil {
+ return nil
+ }
+
+ if err := ctrAliasesBkt.DeleteBucket([]byte(network)); err != nil {
+ return errors.Wrapf(err, "error removing container %s network aliases for network %s", ctr.ID(), network)
+ }
+ }
+
+ return nil
+ })
+}
+
// GetContainerConfig returns a container config from the database by full ID
func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) {
if len(id) == 0 {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index e195ca314..c06fedd3e 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -3,6 +3,7 @@ package libpod
import (
"bytes"
"os"
+ "path/filepath"
"runtime"
"strings"
@@ -25,6 +26,7 @@ const (
volName = "vol"
allVolsName = "allVolumes"
execName = "exec"
+ aliasesName = "aliases"
runtimeConfigName = "runtime-config"
configName = "config"
@@ -35,6 +37,7 @@ const (
containersName = "containers"
podIDName = "pod-id"
namespaceName = "namespace"
+ networksName = "networks"
staticDirName = "static-dir"
tmpDirName = "tmp-dir"
@@ -46,26 +49,28 @@ const (
)
var (
- idRegistryBkt = []byte(idRegistryName)
- nameRegistryBkt = []byte(nameRegistryName)
- nsRegistryBkt = []byte(nsRegistryName)
- ctrBkt = []byte(ctrName)
- allCtrsBkt = []byte(allCtrsName)
- podBkt = []byte(podName)
- allPodsBkt = []byte(allPodsName)
- volBkt = []byte(volName)
- allVolsBkt = []byte(allVolsName)
- execBkt = []byte(execName)
- runtimeConfigBkt = []byte(runtimeConfigName)
-
- configKey = []byte(configName)
- stateKey = []byte(stateName)
+ idRegistryBkt = []byte(idRegistryName)
+ nameRegistryBkt = []byte(nameRegistryName)
+ nsRegistryBkt = []byte(nsRegistryName)
+ ctrBkt = []byte(ctrName)
+ allCtrsBkt = []byte(allCtrsName)
+ podBkt = []byte(podName)
+ allPodsBkt = []byte(allPodsName)
+ volBkt = []byte(volName)
+ allVolsBkt = []byte(allVolsName)
+ execBkt = []byte(execName)
+ aliasesBkt = []byte(aliasesName)
+ runtimeConfigBkt = []byte(runtimeConfigName)
dependenciesBkt = []byte(dependenciesName)
volDependenciesBkt = []byte(volCtrDependencies)
- netNSKey = []byte(netNSName)
- containersBkt = []byte(containersName)
- podIDKey = []byte(podIDName)
- namespaceKey = []byte(namespaceName)
+ networksBkt = []byte(networksName)
+
+ configKey = []byte(configName)
+ stateKey = []byte(stateName)
+ netNSKey = []byte(netNSName)
+ containersBkt = []byte(containersName)
+ podIDKey = []byte(podIDName)
+ namespaceKey = []byte(namespaceName)
staticDirKey = []byte(staticDirName)
tmpDirKey = []byte(tmpDirName)
@@ -104,25 +109,25 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
},
{
"libpod root directory (staticdir)",
- rt.config.Engine.StaticDir,
+ filepath.Clean(rt.config.Engine.StaticDir),
staticDirKey,
"",
},
{
"libpod temporary files directory (tmpdir)",
- rt.config.Engine.TmpDir,
+ filepath.Clean(rt.config.Engine.TmpDir),
tmpDirKey,
"",
},
{
"storage temporary directory (runroot)",
- rt.StorageConfig().RunRoot,
+ filepath.Clean(rt.StorageConfig().RunRoot),
runRootKey,
storeOpts.RunRoot,
},
{
"storage graph root directory (graphroot)",
- rt.StorageConfig().GraphRoot,
+ filepath.Clean(rt.StorageConfig().GraphRoot),
graphRootKey,
storeOpts.GraphRoot,
},
@@ -617,6 +622,23 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return errors.Wrapf(err, "name \"%s\" is in use", ctr.Name())
}
+ allNets := make(map[string]bool)
+
+ // Check that we don't have any empty network names
+ for _, net := range ctr.config.Networks {
+ if net == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names cannot be an empty string")
+ }
+ allNets[net] = true
+ }
+
+ // Each network we have aliases for, must exist in networks
+ for net := range ctr.config.NetworkAliases {
+ if !allNets[net] {
+ return errors.Wrapf(define.ErrNoSuchNetwork, "container %s has network aliases for network %q but is not part of that network", ctr.ID(), net)
+ }
+ }
+
// No overlapping containers
// Add the new container to the DB
if err := idsBucket.Put(ctrID, ctrName); err != nil {
@@ -660,6 +682,35 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return errors.Wrapf(err, "error adding container %s netns path to DB", ctr.ID())
}
}
+ if ctr.config.Networks != nil {
+ ctrNetworksBkt, err := newCtrBkt.CreateBucket(networksBkt)
+ if err != nil {
+ return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID())
+ }
+ for _, network := range ctr.config.Networks {
+ if err := ctrNetworksBkt.Put([]byte(network), ctrID); err != nil {
+ return errors.Wrapf(err, "error adding network %q to networks bucket for container %s", network, ctr.ID())
+ }
+ }
+ }
+ if ctr.config.NetworkAliases != nil {
+ ctrAliasesBkt, err := newCtrBkt.CreateBucket(aliasesBkt)
+ if err != nil {
+ return errors.Wrapf(err, "error creating network aliases bucket for container %s", ctr.ID())
+ }
+ for net, aliases := range ctr.config.NetworkAliases {
+ netAliasesBkt, err := ctrAliasesBkt.CreateBucket([]byte(net))
+ if err != nil {
+ return errors.Wrapf(err, "error creating network aliases bucket for network %q in container %s", net, ctr.ID())
+ }
+ for _, alias := range aliases {
+ if err := netAliasesBkt.Put([]byte(alias), ctrID); err != nil {
+ return errors.Wrapf(err, "error creating network alias %q in network %q for container %s", alias, net, ctr.ID())
+ }
+ }
+ }
+ }
+
if _, err := newCtrBkt.CreateBucket(dependenciesBkt); err != nil {
return errors.Wrapf(err, "error creating dependencies bucket for container %s", ctr.ID())
}
diff --git a/libpod/container.go b/libpod/container.go
index 01419500e..e954d84eb 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -1,26 +1,24 @@
package libpod
import (
+ "bytes"
"fmt"
"io/ioutil"
"net"
"os"
- "path/filepath"
- "strings"
"time"
"github.com/containernetworking/cni/pkg/types"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
- "github.com/containers/common/pkg/config"
"github.com/containers/image/v5/manifest"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/lock"
"github.com/containers/podman/v2/pkg/rootless"
- "github.com/containers/podman/v2/utils"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// CgroupfsDefaultCgroupParent is the cgroup parent for CGroupFS in libpod
@@ -210,6 +208,10 @@ type ContainerState struct {
// and not delegated to the OCI runtime.
ExtensionStageHooks map[string][]spec.Hook `json:"extensionStageHooks,omitempty"`
+ // NetInterfaceDescriptions describe the relationship between a CNI
+ // network and an interface names
+ NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"`
+
// containerPlatformState holds platform-specific container state.
containerPlatformState
}
@@ -235,6 +237,23 @@ type ContainerOverlayVolume struct {
Source string `json:"source,omitempty"`
}
+// ContainerImageVolume is a volume based on a container image. The container
+// image is first mounted on the host and is then bind-mounted into the
+// container.
+type ContainerImageVolume struct {
+ // Source is the source of the image volume. The image can be referred
+ // to by name and by ID.
+ Source string `json:"source"`
+ // Dest is the absolute path of the mount in the container.
+ Dest string `json:"dest"`
+ // ReadWrite sets the volume writable.
+ ReadWrite bool `json:"rw"`
+}
+
+// ContainerNetworkDescriptions describes the relationship between the CNI
+// network and the ethN where N is an integer
+type ContainerNetworkDescriptions map[string]int
+
// Config accessors
// Unlocked
@@ -899,44 +918,43 @@ func (c *Container) CgroupManager() string {
// CGroupPath returns a cgroups "path" for a given container.
func (c *Container) CGroupPath() (string, error) {
- cgroupManager := c.CgroupManager()
-
- switch {
- case c.config.NoCgroups || c.config.CgroupsMode == "disabled":
+ if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
return "", errors.Wrapf(define.ErrNoCgroups, "this container is not creating cgroups")
- case c.config.CgroupsMode == cgroupSplit:
- if c.config.CgroupParent != "" {
- return "", errors.Errorf("cannot specify cgroup-parent with cgroup-mode %q", cgroupSplit)
- }
- cg, err := utils.GetCgroupProcess(c.state.ConmonPID)
- if err != nil {
- return "", err
+ }
+
+ // Read /proc/[PID]/cgroup and find the *longest* cgroup entry. That's
+ // needed to account for hacks in cgroups v1, where each line in the
+ // file could potentially point to a cgroup. The longest one, however,
+ // is the libpod-specific one we're looking for.
+ //
+ // See #8397 on the need for the longest-path look up.
+ procPath := fmt.Sprintf("/proc/%d/cgroup", c.state.PID)
+ lines, err := ioutil.ReadFile(procPath)
+ if err != nil {
+ return "", err
+ }
+
+ var cgroupPath string
+ for _, line := range bytes.Split(lines, []byte("\n")) {
+ // cgroups(7) nails it down to three fields with the 3rd
+ // pointing to the cgroup's path which works both on v1 and v2.
+ fields := bytes.Split(line, []byte(":"))
+ if len(fields) != 3 {
+ logrus.Debugf("Error parsing cgroup: expected 3 fields but got %d: %s", len(fields), procPath)
+ continue
}
- // Use the conmon cgroup for two reasons: we validate the container
- // delegation was correct, and the conmon cgroup doesn't change at runtime
- // while we are not sure about the container that can create sub cgroups.
- if !strings.HasSuffix(cg, "supervisor") {
- return "", errors.Errorf("invalid cgroup for conmon %q", cg)
+ path := string(fields[2])
+ if len(path) > len(cgroupPath) {
+ cgroupPath = path
}
- return strings.TrimSuffix(cg, "/supervisor") + "/container", nil
- case cgroupManager == config.CgroupfsCgroupsManager:
- return filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID())), nil
- case cgroupManager == config.SystemdCgroupsManager:
- if rootless.IsRootless() {
- uid := rootless.GetRootlessUID()
- parts := strings.SplitN(c.config.CgroupParent, "/", 2)
-
- dir := ""
- if len(parts) > 1 {
- dir = parts[1]
- }
- return filepath.Join(parts[0], fmt.Sprintf("user-%d.slice/user@%d.service/user.slice/%s", uid, uid, dir), createUnitName("libpod", c.ID())), nil
- }
- return filepath.Join(c.config.CgroupParent, createUnitName("libpod", c.ID())), nil
- default:
- return "", errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager %s in use", cgroupManager)
}
+
+ if len(cgroupPath) == 0 {
+ return "", errors.Errorf("could not find any cgroup in %q", procPath)
+ }
+
+ return cgroupPath, nil
}
// RootFsSize returns the root FS size of the container
@@ -1072,3 +1090,68 @@ func (c *Container) Timezone() string {
func (c *Container) Umask() string {
return c.config.Umask
}
+
+// Networks gets all the networks this container is connected to.
+// Please do NOT use ctr.config.Networks, as this can be changed from those
+// values at runtime via network connect and disconnect.
+// If the container is configured to use CNI and this function returns an empty
+// array, the container will still be connected to the default network.
+// The second return parameter, a bool, indicates that the container container
+// is joining the default CNI network - the network name will be included in the
+// returned array of network names, but the container did not explicitly join
+// this network.
+func (c *Container) Networks() ([]string, bool, error) {
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return nil, false, err
+ }
+ }
+
+ return c.networks()
+}
+
+// Unlocked accessor for networks
+func (c *Container) networks() ([]string, bool, error) {
+ networks, err := c.runtime.state.GetNetworks(c)
+ if err != nil && errors.Cause(err) == define.ErrNoSuchNetwork {
+ if len(c.config.Networks) == 0 && !rootless.IsRootless() {
+ return []string{c.runtime.netPlugin.GetDefaultNetworkName()}, true, nil
+ }
+ return c.config.Networks, false, nil
+ }
+
+ return networks, false, err
+}
+
+// networksByNameIndex provides us with a map of container networks where key
+// is network name and value is the index position
+func (c *Container) networksByNameIndex() (map[string]int, error) {
+ networks, _, err := c.networks()
+ if err != nil {
+ return nil, err
+ }
+ networkNamesByIndex := make(map[string]int, len(networks))
+ for index, name := range networks {
+ networkNamesByIndex[name] = index
+ }
+ return networkNamesByIndex, nil
+}
+
+// add puts the new given CNI network name into the tracking map
+// and assigns it a new integer based on the map length
+func (d ContainerNetworkDescriptions) add(networkName string) {
+ d[networkName] = len(d)
+}
+
+// getInterfaceByName returns a formatted interface name for a given
+// network along with a bool as to whether the network existed
+func (d ContainerNetworkDescriptions) getInterfaceByName(networkName string) (string, bool) {
+ val, exists := d[networkName]
+ if !exists {
+ return "", exists
+ }
+ return fmt.Sprintf("eth%d", val), exists
+}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index aef37dd59..6a7ddc421 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -249,7 +249,7 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-
// attaching, and I really do not want to do that right now.
// Send a SIGWINCH after attach succeeds so that most programs will
// redraw the screen for the new attach session.
- attachRdy := make(chan bool)
+ attachRdy := make(chan bool, 1)
if c.config.Spec.Process != nil && c.config.Spec.Process.Terminal {
go func() {
<-attachRdy
@@ -714,3 +714,17 @@ func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOpti
defer c.newContainerEvent(events.Restore)
return c.restore(ctx, options)
}
+
+// Indicate whether or not the container should restart
+func (c *Container) ShouldRestart(ctx context.Context) bool {
+ logrus.Debugf("Checking if container %s should restart", c.ID())
+ if !c.batched {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if err := c.syncContainer(); err != nil {
+ return false
+ }
+ }
+ return c.shouldRestart()
+}
diff --git a/libpod/container_config.go b/libpod/container_config.go
index e264da4da..cc3ad25ea 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -134,6 +134,8 @@ type ContainerRootFSConfig struct {
NamedVolumes []*ContainerNamedVolume `json:"namedVolumes,omitempty"`
// OverlayVolumes lists the overlay volumes to mount into the container.
OverlayVolumes []*ContainerOverlayVolume `json:"overlayVolumes,omitempty"`
+ // ImageVolumes lists the image volumes to mount into the container.
+ ImageVolumes []*ContainerImageVolume `json:"imageVolumes,omitempty"`
// CreateWorkingDir indicates that Libpod should create the container's
// working directory if it does not exist. Some OCI runtimes do this by
// default, but others do not.
@@ -234,11 +236,23 @@ type ContainerNetworkConfig struct {
// Will be appended to host's host file
HostAdd []string `json:"hostsAdd,omitempty"`
// Network names (CNI) to add container to. Empty to use default network.
+ // Please note that these can be altered at runtime. The actual list is
+ // stored in the DB and should be retrieved from there; this is only the
+ // set of networks the container was *created* with.
Networks []string `json:"networks,omitempty"`
// Network mode specified for the default network.
NetMode namespaces.NetworkMode `json:"networkMode,omitempty"`
// NetworkOptions are additional options for each network
NetworkOptions map[string][]string `json:"network_options,omitempty"`
+ // NetworkAliases are aliases that will be added to each network.
+ // These are additional names that this container can be accessed as via
+ // DNS when the CNI dnsname plugin is in use.
+ // Please note that these can be altered at runtime. As such, the actual
+ // list is stored in the database and should be retrieved from there;
+ // this is only the set of aliases the container was *created with*.
+ // Formatted as map of network name to aliases. All network names must
+ // be present in the Networks list above.
+ NetworkAliases map[string][]string `json:"network_alises,omitempty"`
}
// ContainerImageConfig is an embedded sub-config providing image configuration
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index b8bce1272..f78d74ef7 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -90,7 +90,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
}
namedVolumes, mounts := c.sortUserVolumes(ctrSpec)
- inspectMounts, err := c.getInspectMounts(namedVolumes, mounts)
+ inspectMounts, err := c.getInspectMounts(namedVolumes, c.config.ImageVolumes, mounts)
if err != nil {
return nil, err
}
@@ -192,7 +192,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
// Get inspect-formatted mounts list.
// Only includes user-specified mounts. Only includes bind mounts and named
// volumes, not tmpfs volumes.
-func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) ([]define.InspectMount, error) {
+func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, imageVolumes []*ContainerImageVolume, mounts []spec.Mount) ([]define.InspectMount, error) {
inspectMounts := []define.InspectMount{}
// No mounts, return early
@@ -219,6 +219,17 @@ func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, mount
inspectMounts = append(inspectMounts, mountStruct)
}
+
+ for _, volume := range imageVolumes {
+ mountStruct := define.InspectMount{}
+ mountStruct.Type = "image"
+ mountStruct.Destination = volume.Dest
+ mountStruct.Source = volume.Source
+ mountStruct.RW = volume.ReadWrite
+
+ inspectMounts = append(inspectMounts, mountStruct)
+ }
+
for _, mount := range mounts {
// It's a mount.
// Is it a tmpfs? If so, discard.
@@ -259,7 +270,7 @@ func parseMountOptionsForInspect(options []string, mount *define.InspectMount) {
isRW = false
case "rw":
// Do nothing, silently discard
- case "shared", "slave", "private", "rshared", "rslave", "rprivate":
+ case "shared", "slave", "private", "rshared", "rslave", "rprivate", "unbindable", "runbindable":
mountProp = opt
case "z", "Z":
zZ = opt
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 4ae571de6..b6a3244ea 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -206,37 +206,39 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
return nil
}
-// Handle container restart policy.
-// This is called when a container has exited, and was not explicitly stopped by
-// an API call to stop the container or pod it is in.
-func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr error) {
- // If we did not get a restart policy match, exit immediately.
+func (c *Container) shouldRestart() bool {
+ // If we did not get a restart policy match, return false
// Do the same if we're not a policy that restarts.
if !c.state.RestartPolicyMatch ||
c.config.RestartPolicy == RestartPolicyNo ||
c.config.RestartPolicy == RestartPolicyNone {
- return false, nil
+ return false
}
// If we're RestartPolicyOnFailure, we need to check retries and exit
// code.
if c.config.RestartPolicy == RestartPolicyOnFailure {
if c.state.ExitCode == 0 {
- return false, nil
+ return false
}
// If we don't have a max retries set, continue
if c.config.RestartRetries > 0 {
- if c.state.RestartCount < c.config.RestartRetries {
- logrus.Debugf("Container %s restart policy trigger: on retry %d (of %d)",
- c.ID(), c.state.RestartCount, c.config.RestartRetries)
- } else {
- logrus.Debugf("Container %s restart policy trigger: retries exhausted", c.ID())
- return false, nil
+ if c.state.RestartCount >= c.config.RestartRetries {
+ return false
}
}
}
+ return true
+}
+// Handle container restart policy.
+// This is called when a container has exited, and was not explicitly stopped by
+// an API call to stop the container or pod it is in.
+func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr error) {
+ if !c.shouldRestart() {
+ return false, nil
+ }
logrus.Debugf("Restarting container %s due to restart policy %s", c.ID(), c.config.RestartPolicy)
// Need to check if dependencies are alive.
@@ -578,10 +580,10 @@ func (c *Container) refresh() error {
if len(c.config.IDMappings.UIDMap) != 0 || len(c.config.IDMappings.GIDMap) != 0 {
info, err := os.Stat(c.runtime.config.Engine.TmpDir)
if err != nil {
- return errors.Wrapf(err, "cannot stat `%s`", c.runtime.config.Engine.TmpDir)
+ return err
}
if err := os.Chmod(c.runtime.config.Engine.TmpDir, info.Mode()|0111); err != nil {
- return errors.Wrapf(err, "cannot chmod `%s`", c.runtime.config.Engine.TmpDir)
+ return err
}
root := filepath.Join(c.runtime.config.Engine.TmpDir, "containers-root", c.ID())
if err := os.MkdirAll(root, 0755); err != nil {
@@ -641,13 +643,13 @@ func (c *Container) removeIPv4Allocations() error {
cniDefaultNetwork = c.runtime.netPlugin.GetDefaultNetworkName()
}
- switch {
- case len(c.config.Networks) > 0 && len(c.config.Networks) != len(c.state.NetworkStatus):
- return errors.Wrapf(define.ErrInternal, "network mismatch: asked to join %d CNI networks but got %d CNI results", len(c.config.Networks), len(c.state.NetworkStatus))
- case len(c.config.Networks) == 0 && len(c.state.NetworkStatus) != 1:
- return errors.Wrapf(define.ErrInternal, "network mismatch: did not specify CNI networks but joined more than one (%d)", len(c.state.NetworkStatus))
- case len(c.config.Networks) == 0 && cniDefaultNetwork == "":
- return errors.Wrapf(define.ErrInternal, "could not retrieve name of CNI default network")
+ networks, _, err := c.networks()
+ if err != nil {
+ return err
+ }
+
+ if len(networks) != len(c.state.NetworkStatus) {
+ return errors.Wrapf(define.ErrInternal, "network mismatch: asked to join %d CNI networks but got %d CNI results", len(networks), len(c.state.NetworkStatus))
}
for index, result := range c.state.NetworkStatus {
@@ -656,11 +658,11 @@ func (c *Container) removeIPv4Allocations() error {
continue
}
candidate := ""
- if len(c.config.Networks) > 0 {
+ if len(networks) > 0 {
// CNI returns networks in order we passed them.
// So our index into results should be our index
// into networks.
- candidate = filepath.Join(cniNetworksDir, c.config.Networks[index], ctrIP.Address.IP.String())
+ candidate = filepath.Join(cniNetworksDir, networks[index], ctrIP.Address.IP.String())
} else {
candidate = filepath.Join(cniNetworksDir, cniDefaultNetwork, ctrIP.Address.IP.String())
}
@@ -1503,6 +1505,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
// config.
// Returns the volume that was mounted.
func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) (*Volume, error) {
+ logrus.Debugf("Going to mount named volume %s", v.Name)
vol, err := c.runtime.state.Volume(v.Name)
if err != nil {
return nil, errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
@@ -1734,6 +1737,25 @@ func (c *Container) cleanup(ctx context.Context) error {
}
}
+ // Unmount image volumes
+ for _, v := range c.config.ImageVolumes {
+ img, err := c.runtime.ImageRuntime().NewFromLocal(v.Source)
+ if err != nil {
+ if lastError == nil {
+ lastError = err
+ continue
+ }
+ logrus.Errorf("error unmounting image volume %q:%q :%v", v.Source, v.Dest, err)
+ }
+ if err := img.Unmount(false); err != nil {
+ if lastError == nil {
+ lastError = err
+ continue
+ }
+ logrus.Errorf("error unmounting image volume %q:%q :%v", v.Source, v.Dest, err)
+ }
+ }
+
return lastError
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index a1b4334fb..b81f3f716 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -39,6 +39,7 @@ import (
"github.com/containers/storage/pkg/idtools"
securejoin "github.com/cyphar/filepath-securejoin"
runcuser "github.com/opencontainers/runc/libcontainer/user"
+ "github.com/opencontainers/runtime-spec/specs-go"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -308,7 +309,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
fallthrough
case "Z":
if err := label.Relabel(m.Source, c.MountLabel(), label.IsShared(o)); err != nil {
- return nil, errors.Wrapf(err, "relabel failed %q", m.Source)
+ return nil, err
}
default:
@@ -343,7 +344,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
Type: "bind",
Source: srcPath,
Destination: dstPath,
- Options: []string{"bind", "private"},
+ Options: []string{"bind", "rprivate"},
}
if c.IsReadOnly() && dstPath != "/dev/shm" {
newMount.Options = append(newMount.Options, "ro", "nosuid", "noexec", "nodev")
@@ -359,11 +360,40 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
for _, overlayVol := range c.config.OverlayVolumes {
contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID())
if err != nil {
- return nil, errors.Wrapf(err, "failed to create TempDir in the %s directory", c.config.StaticDir)
+ return nil, err
}
overlayMount, err := overlay.Mount(contentDir, overlayVol.Source, overlayVol.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
if err != nil {
- return nil, errors.Wrapf(err, "creating overlay failed %q", overlayVol.Source)
+ return nil, errors.Wrapf(err, "mounting overlay failed %q", overlayVol.Source)
+ }
+ g.AddMount(overlayMount)
+ }
+
+ // Add image volumes as overlay mounts
+ for _, volume := range c.config.ImageVolumes {
+ // Mount the specified image.
+ img, err := c.runtime.ImageRuntime().NewFromLocal(volume.Source)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating image volume %q:%q", volume.Source, volume.Dest)
+ }
+ mountPoint, err := img.Mount(nil, "")
+ if err != nil {
+ return nil, errors.Wrapf(err, "error mounting image volume %q:%q", volume.Source, volume.Dest)
+ }
+
+ contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID())
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create TempDir in the %s directory", c.config.StaticDir)
+ }
+
+ var overlayMount specs.Mount
+ if volume.ReadWrite {
+ overlayMount, err = overlay.Mount(contentDir, mountPoint, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
+ } else {
+ overlayMount, err = overlay.MountReadOnly(contentDir, mountPoint, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "creating overlay mount for image %q failed", volume.Source)
}
g.AddMount(overlayMount)
}
@@ -668,11 +698,31 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro
}
g.AddMount(systemdMnt)
} else {
+ mountOptions := []string{"bind", "rprivate"}
+
+ var statfs unix.Statfs_t
+ if err := unix.Statfs("/sys/fs/cgroup/systemd", &statfs); err != nil {
+ mountOptions = append(mountOptions, "nodev", "noexec", "nosuid")
+ } else {
+ if statfs.Flags&unix.MS_NODEV == unix.MS_NODEV {
+ mountOptions = append(mountOptions, "nodev")
+ }
+ if statfs.Flags&unix.MS_NOEXEC == unix.MS_NOEXEC {
+ mountOptions = append(mountOptions, "noexec")
+ }
+ if statfs.Flags&unix.MS_NOSUID == unix.MS_NOSUID {
+ mountOptions = append(mountOptions, "nosuid")
+ }
+ if statfs.Flags&unix.MS_RDONLY == unix.MS_RDONLY {
+ mountOptions = append(mountOptions, "ro")
+ }
+ }
+
systemdMnt := spec.Mount{
Destination: "/sys/fs/cgroup/systemd",
Type: "bind",
Source: "/sys/fs/cgroup/systemd",
- Options: []string{"bind", "nodev", "noexec", "nosuid", "rprivate"},
+ Options: mountOptions,
}
g.AddMount(systemdMnt)
g.AddLinuxMaskedPaths("/sys/fs/cgroup/systemd/release_agent")
@@ -781,7 +831,7 @@ func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) error {
return errors.Wrapf(err, "error creating delete files list file %q", deleteFilesList)
}
if err := ioutil.WriteFile(deleteFilesList, formatJSON, 0600); err != nil {
- return errors.Wrapf(err, "error creating delete files list file %q", deleteFilesList)
+ return errors.Wrap(err, "error creating delete files list file")
}
includeFiles = append(includeFiles, "deleted.files")
@@ -805,7 +855,7 @@ func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) error {
defer outFile.Close()
if err := os.Chmod(dest, 0600); err != nil {
- return errors.Wrapf(err, "cannot chmod %q", dest)
+ return err
}
_, err = io.Copy(outFile, input)
@@ -1029,7 +1079,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if n.Sandbox != "" {
MAC, err = net.ParseMAC(n.Mac)
if err != nil {
- return errors.Wrapf(err, "failed to parse MAC %v", n.Mac)
+ return err
}
break
}
@@ -1133,14 +1183,14 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return errors.Wrapf(err, "failed to read deleted files file")
}
if err := json.Unmarshal(deletedFilesJSON, &deletedFiles); err != nil {
- return errors.Wrapf(err, "failed to read deleted files file %s", deletedFilesPath)
+ return errors.Wrapf(err, "failed to unmarshal deleted files file %s", deletedFilesPath)
}
for _, deleteFile := range deletedFiles {
// Using RemoveAll as deletedFiles, which is generated from 'podman diff'
// lists completely deleted directories as a single entry: 'D /root'.
err = os.RemoveAll(filepath.Join(c.state.Mountpoint, deleteFile))
if err != nil {
- return errors.Wrapf(err, "failed to delete file %s from container %s during restore", deletedFilesPath, c.ID())
+ return errors.Wrapf(err, "failed to delete files from container %s during restore", c.ID())
}
}
}
@@ -1179,7 +1229,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Make standard bind mounts to include in the container
func (c *Container) makeBindMounts() error {
if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil {
- return errors.Wrapf(err, "cannot chown run directory %s", c.state.RunDir)
+ return errors.Wrap(err, "cannot chown run directory")
}
if c.state.BindMounts == nil {
@@ -1197,13 +1247,13 @@ func (c *Container) makeBindMounts() error {
if c.config.NetNsCtr == "" {
if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s resolv.conf", c.ID())
+ return errors.Wrapf(err, "container %s", c.ID())
}
delete(c.state.BindMounts, "/etc/resolv.conf")
}
if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing container %s hosts", c.ID())
+ return errors.Wrapf(err, "container %s", c.ID())
}
delete(c.state.BindMounts, "/etc/hosts")
}
@@ -1304,6 +1354,14 @@ func (c *Container) makeBindMounts() error {
return err
}
}
+ } else {
+ if !c.config.UseImageHosts && c.state.BindMounts["/etc/hosts"] == "" {
+ newHosts, err := c.generateHosts("/etc/hosts")
+ if err != nil {
+ return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
+ }
+ c.state.BindMounts["/etc/hosts"] = newHosts
+ }
}
// SHM is always added when we mount the container
@@ -1403,7 +1461,7 @@ func (c *Container) generateResolvConf() (string, error) {
if err == nil {
resolvConf = definedPath
} else if !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "failed to stat %s", definedPath)
+ return "", err
}
}
break
@@ -1425,7 +1483,7 @@ func (c *Container) generateResolvConf() (string, error) {
contents, err := ioutil.ReadFile(resolvPath)
// resolv.conf doesn't have to exists
if err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "unable to read %s", resolvPath)
+ return "", err
}
// Ensure that the container's /etc/resolv.conf is compatible with its
@@ -1494,7 +1552,7 @@ func (c *Container) generateResolvConf() (string, error) {
destPath := filepath.Join(c.state.RunDir, "resolv.conf")
if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "error removing resolv.conf for container %s", c.ID())
+ return "", errors.Wrapf(err, "container %s", c.ID())
}
// Build resolv.conf
@@ -1514,7 +1572,7 @@ func (c *Container) generateResolvConf() (string, error) {
func (c *Container) generateHosts(path string) (string, error) {
orig, err := ioutil.ReadFile(path)
if err != nil {
- return "", errors.Wrapf(err, "unable to read %s", path)
+ return "", err
}
hosts := string(orig)
hosts += c.getHosts()
@@ -1564,14 +1622,11 @@ func (c *Container) getHosts() string {
}
if !hasNetNS {
// 127.0.1.1 and host's hostname to match Docker
- osHostname, err := os.Hostname()
- if err != nil {
- osHostname = c.Hostname()
- }
- hosts += fmt.Sprintf("127.0.1.1 %s\n", osHostname)
+ osHostname, _ := os.Hostname()
+ hosts += fmt.Sprintf("127.0.1.1 %s %s %s\n", osHostname, c.Hostname(), c.config.Name)
}
if netNone {
- hosts += fmt.Sprintf("127.0.1.1 %s\n", c.Hostname())
+ hosts += fmt.Sprintf("127.0.1.1 %s %s\n", c.Hostname(), c.config.Name)
}
}
}
@@ -1917,7 +1972,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
}
orig, err := ioutil.ReadFile(originPasswdFile)
if err != nil && !os.IsNotExist(err) {
- return "", "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile)
+ return "", "", err
}
passwdFile, err := c.writeStringToStaticDir("passwd", string(orig)+passwdEntry)
if err != nil {
@@ -1936,7 +1991,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
- return "", "", errors.Wrapf(err, "error opening container %s /etc/passwd", c.ID())
+ return "", "", errors.Wrapf(err, "container %s", c.ID())
}
defer f.Close()
@@ -1963,7 +2018,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
}
orig, err := ioutil.ReadFile(originGroupFile)
if err != nil && !os.IsNotExist(err) {
- return "", "", errors.Wrapf(err, "unable to read group file %s", originGroupFile)
+ return "", "", err
}
groupFile, err := c.writeStringToStaticDir("group", string(orig)+groupEntry)
if err != nil {
@@ -1982,7 +2037,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
f, err := os.OpenFile(containerGroup, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
- return "", "", errors.Wrapf(err, "error opening container %s /etc/group", c.ID())
+ return "", "", errors.Wrapf(err, "container %s", c.ID())
}
defer f.Close()
@@ -2003,13 +2058,13 @@ func (c *Container) copyOwnerAndPerms(source, dest string) error {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrapf(err, "cannot stat `%s`", dest)
+ return err
}
if err := os.Chmod(dest, info.Mode()); err != nil {
- return errors.Wrapf(err, "cannot chmod `%s`", dest)
+ return err
}
if err := os.Chown(dest, int(info.Sys().(*syscall.Stat_t).Uid), int(info.Sys().(*syscall.Stat_t).Gid)); err != nil {
- return errors.Wrapf(err, "cannot chown `%s`", dest)
+ return err
}
return nil
}
@@ -2041,10 +2096,7 @@ func (c *Container) getOCICgroupPath() (string, error) {
logrus.Debugf("Setting CGroups for container %s to %s", c.ID(), systemdCgroups)
return systemdCgroups, nil
case cgroupManager == config.CgroupfsCgroupsManager:
- cgroupPath, err := c.CGroupPath()
- if err != nil {
- return "", err
- }
+ cgroupPath := filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID()))
logrus.Debugf("Setting CGroup path for container %s to %s", c.ID(), cgroupPath)
return cgroupPath, nil
default:
@@ -2100,7 +2152,7 @@ func (c *Container) checkFileExistsInRootfs(file string) (bool, error) {
if os.IsNotExist(err) {
return false, nil
}
- return false, errors.Wrapf(err, "error accessing container %s file %q", c.ID(), file)
+ return false, errors.Wrapf(err, "container %s", c.ID())
}
if stat.IsDir() {
return false, nil
diff --git a/libpod/container_validate.go b/libpod/container_validate.go
index b78168cd1..fa809436e 100644
--- a/libpod/container_validate.go
+++ b/libpod/container_validate.go
@@ -88,7 +88,7 @@ func (c *Container) validate() error {
return errors.Wrapf(define.ErrInvalidArg, "cannot add to /etc/hosts if using image's /etc/hosts")
}
- // Check named volume and overlay volumes destination conflits
+ // Check named volume, overlay volume and image volume destination conflits
destinations := make(map[string]bool)
for _, vol := range c.config.NamedVolumes {
// Don't check if they already exist.
@@ -106,6 +106,25 @@ func (c *Container) validate() error {
}
destinations[vol.Dest] = true
}
+ for _, vol := range c.config.ImageVolumes {
+ // Don't check if they already exist.
+ // If they don't we will automatically create them.
+ if _, ok := destinations[vol.Dest]; ok {
+ return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
+ }
+ destinations[vol.Dest] = true
+ }
+
+ // Check that networks and network aliases match up.
+ ctrNets := make(map[string]bool)
+ for _, net := range c.config.Networks {
+ ctrNets[net] = true
+ }
+ for net := range c.config.NetworkAliases {
+ if _, ok := ctrNets[net]; !ok {
+ return errors.Wrapf(define.ErrNoSuchNetwork, "container tried to set network aliases for network %s but is not connected to the network", net)
+ }
+ }
return nil
}
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index 38b3a6686..775965477 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -575,6 +575,8 @@ type InspectAdditionalNetwork struct {
// Links is presently unused and maintained exclusively for
// compatibility.
Links []string `json:"Links"`
+ // Aliases are any network aliases the container has in this network.
+ Aliases []string `json:"Aliases,omitempty"`
}
// InspectNetworkSettings holds information about the network settings of the
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index 627928ef7..b96d36429 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -14,6 +14,9 @@ var (
// ErrNoSuchImage indicates the requested image does not exist
ErrNoSuchImage = errors.New("no such image")
+ // ErrMultipleImages found multiple name and tag matches
+ ErrMultipleImages = errors.New("found multiple name and tag matches")
+
// ErrNoSuchTag indicates the requested image tag does not exist
ErrNoSuchTag = errors.New("no such tag")
@@ -27,6 +30,10 @@ var (
// not exist.
ErrNoSuchExecSession = errors.New("no such exec session")
+ // ErrNoAliases indicates that the container does not have any network
+ // aliases.
+ ErrNoAliases = errors.New("no aliases for container")
+
// ErrCtrExists indicates a container with the same name or ID already
// exists
ErrCtrExists = errors.New("container already exists")
@@ -39,6 +46,9 @@ var (
// ErrExecSessionExists indicates an exec session with the same ID
// already exists.
ErrExecSessionExists = errors.New("exec session already exists")
+ // ErrNetworkExists indicates that a network with the given name already
+ // exists.
+ ErrNetworkExists = errors.New("network already exists")
// ErrCtrStateInvalid indicates a container is in an improper state for
// the requested operation
@@ -138,15 +148,15 @@ var (
// ErrOCIRuntimePermissionDenied indicates the OCI runtime attempted to invoke a command that returned
// a permission denied error
- ErrOCIRuntimePermissionDenied = errors.New("OCI runtime permission denied error")
+ ErrOCIRuntimePermissionDenied = errors.New("OCI permission denied")
// ErrOCIRuntimeNotFound indicates the OCI runtime attempted to invoke a command
// that was not found
- ErrOCIRuntimeNotFound = errors.New("OCI runtime command not found error")
+ ErrOCIRuntimeNotFound = errors.New("OCI not found")
// ErrOCIRuntimeUnavailable indicates that the OCI runtime associated to a container
// could not be found in the configuration
- ErrOCIRuntimeUnavailable = errors.New("OCI runtime not available in the current configuration")
+ ErrOCIRuntimeUnavailable = errors.New("OCI unavailable")
// ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH)
// is out of date for the current podman version
@@ -168,4 +178,7 @@ var (
// ErrStoreNotInitialized indicates that the container storage was never
// initialized.
ErrStoreNotInitialized = errors.New("the container storage was never initialized")
+
+ // ErrNoNetwork indicates that a container has no net namespace, like network=none
+ ErrNoNetwork = errors.New("container has no network namespace")
)
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index a4115eb92..2fa91166f 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -67,7 +67,7 @@ type InspectPodInfraConfig struct {
StaticIP net.IP
// StaticMAC is a static MAC address that will be assigned to the infra
// container and then used by the pod.
- StaticMAC net.HardwareAddr
+ StaticMAC string
// NoManageResolvConf indicates that the pod will not manage resolv.conf
// and instead each container will handle their own.
NoManageResolvConf bool
diff --git a/libpod/define/podstate.go b/libpod/define/podstate.go
index 2b59aabfb..e02671972 100644
--- a/libpod/define/podstate.go
+++ b/libpod/define/podstate.go
@@ -10,9 +10,12 @@ const (
PodStateExited = "Exited"
// PodStatePaused indicates the pod has been paused
PodStatePaused = "Paused"
- // PodStateRunning indicates that one or more of the containers in
- // the pod is running
+ // PodStateRunning indicates that all of the containers in the pod are
+ // running.
PodStateRunning = "Running"
+ // PodStateDegraded indicates that at least one, but not all, of the
+ // containers in the pod are running.
+ PodStateDegraded = "Degraded"
// PodStateStopped indicates all of the containers belonging to the pod
// are stopped.
PodStateStopped = "Stopped"
diff --git a/libpod/events.go b/libpod/events.go
index 95317eb01..e199a3846 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -50,6 +50,18 @@ func (c *Container) newContainerExitedEvent(exitCode int32) {
}
}
+// netNetworkEvent creates a new event based on a network connect/disconnect
+func (c *Container) newNetworkEvent(status events.Status, netName string) {
+ e := events.NewEvent(status)
+ e.ID = c.ID()
+ e.Name = c.Name()
+ e.Type = events.Network
+ e.Network = netName
+ if err := c.runtime.eventer.Write(e); err != nil {
+ logrus.Errorf("unable to write pod event: %q", err)
+ }
+}
+
// newPodEvent creates a new event for a libpod pod
func (p *Pod) newPodEvent(status events.Status) {
e := events.NewEvent(status)
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 2ec3111fe..af09a65ae 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -30,6 +30,8 @@ type Event struct {
Image string `json:",omitempty"`
// Name where applicable
Name string `json:",omitempty"`
+ // Network is the network name in a network event
+ Network string `json:"network,omitempty"`
// Status describes the event that occurred
Status Status
// Time the event occurred
@@ -101,6 +103,8 @@ const (
Container Type = "container"
// Image - event is related to images
Image Type = "image"
+ // Network - event is related to networks
+ Network Type = "network"
// Pod - event is related to pods
Pod Type = "pod"
// System - event is related to Podman whole and not to any specific
@@ -141,6 +145,10 @@ const (
LoadFromArchive Status = "loadfromarchive"
// Mount ...
Mount Status = "mount"
+ // NetworkConnect
+ NetworkConnect Status = "connect"
+ // NetworkDisconnect
+ NetworkDisconnect Status = "disconnect"
// Pause ...
Pause Status = "pause"
// Prune ...
diff --git a/libpod/events/events.go b/libpod/events/events.go
index 42939d64c..4e7267af3 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -77,6 +77,8 @@ func (e *Event) ToHumanReadable() string {
}
}
humanFormat += ")"
+ case Network:
+ humanFormat = fmt.Sprintf("%s %s %s %s (container=%s, name=%s)", e.Time, e.Type, e.Status, e.ID, e.ID, e.Network)
case Image:
humanFormat = fmt.Sprintf("%s %s %s %s %s", e.Time, e.Type, e.Status, e.ID, e.Name)
case System:
@@ -115,6 +117,8 @@ func StringToType(name string) (Type, error) {
return Container, nil
case Image.String():
return Image, nil
+ case Network.String():
+ return Network, nil
case Pod.String():
return Pod, nil
case System.String():
@@ -162,6 +166,10 @@ func StringToStatus(name string) (Status, error) {
return LoadFromArchive, nil
case Mount.String():
return Mount, nil
+ case NetworkConnect.String():
+ return NetworkConnect, nil
+ case NetworkDisconnect.String():
+ return NetworkDisconnect, nil
case Pause.String():
return Pause, nil
case Prune.String():
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 5e3be8009..9a514e302 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -56,6 +56,9 @@ func (e EventJournalD) Write(ee Event) error {
}
m["PODMAN_LABELS"] = string(b)
}
+ case Network:
+ m["PODMAN_ID"] = ee.ID
+ m["PODMAN_NETWORK_NAME"] = ee.Network
case Volume:
m["PODMAN_NAME"] = ee.Name
}
@@ -197,6 +200,9 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { /
newEvent.Details = Details{Attributes: labels}
}
}
+ case Network:
+ newEvent.ID = entry.Fields["PODMAN_ID"]
+ newEvent.Network = entry.Fields["PODMAN_NETWORK_NAME"]
case Image:
newEvent.ID = entry.Fields["PODMAN_ID"]
}
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index b70102450..57e38b815 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -76,7 +76,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
return err
}
switch event.Type {
- case Image, Volume, Pod, System, Container:
+ case Image, Volume, Pod, System, Container, Network:
// no-op
default:
return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
diff --git a/libpod/filters/containers.go b/libpod/filters/containers.go
index e38e024d2..2520c4f30 100644
--- a/libpod/filters/containers.go
+++ b/libpod/filters/containers.go
@@ -1,7 +1,6 @@
package lpfilters
import (
- "regexp"
"strconv"
"strings"
"time"
@@ -14,96 +13,130 @@ import (
)
// GenerateContainerFilterFuncs return ContainerFilter functions based of filter.
-func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
+func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
switch filter {
case "id":
+ // we only have to match one ID
return func(c *libpod.Container) bool {
- return strings.Contains(c.ID(), filterValue)
+ return util.StringMatchRegexSlice(c.ID(), filterValues)
}, nil
case "label":
- var filterArray = strings.SplitN(filterValue, "=", 2)
- var filterKey = filterArray[0]
- if len(filterArray) > 1 {
- filterValue = filterArray[1]
- } else {
- filterValue = ""
- }
+ // we have to match that all given labels exits on that container
return func(c *libpod.Container) bool {
- for labelKey, labelValue := range c.Labels() {
- if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
- return true
+ labels := c.Labels()
+ for _, filterValue := range filterValues {
+ matched := false
+ filterArray := strings.SplitN(filterValue, "=", 2)
+ filterKey := filterArray[0]
+ if len(filterArray) > 1 {
+ filterValue = filterArray[1]
+ } else {
+ filterValue = ""
+ }
+ for labelKey, labelValue := range labels {
+ if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ return false
}
}
- return false
+ return true
}, nil
case "name":
+ // we only have to match one name
return func(c *libpod.Container) bool {
- match, err := regexp.MatchString(filterValue, c.Name())
- if err != nil {
- return false
- }
- return match
+ return util.StringMatchRegexSlice(c.Name(), filterValues)
}, nil
case "exited":
- exitCode, err := strconv.ParseInt(filterValue, 10, 32)
- if err != nil {
- return nil, errors.Wrapf(err, "exited code out of range %q", filterValue)
+ var exitCodes []int32
+ for _, exitCode := range filterValues {
+ ec, err := strconv.ParseInt(exitCode, 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "exited code out of range %q", ec)
+ }
+ exitCodes = append(exitCodes, int32(ec))
}
return func(c *libpod.Container) bool {
ec, exited, err := c.ExitCode()
- if ec == int32(exitCode) && err == nil && exited {
- return true
+ if err == nil && exited {
+ for _, exitCode := range exitCodes {
+ if ec == exitCode {
+ return true
+ }
+ }
}
return false
}, nil
case "status":
- if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
- return nil, errors.Errorf("%s is not a valid status", filterValue)
+ for _, filterValue := range filterValues {
+ if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
+ return nil, errors.Errorf("%s is not a valid status", filterValue)
+ }
}
return func(c *libpod.Container) bool {
status, err := c.State()
if err != nil {
return false
}
- if filterValue == "stopped" {
- filterValue = "exited"
- }
state := status.String()
if status == define.ContainerStateConfigured {
state = "created"
} else if status == define.ContainerStateStopped {
state = "exited"
}
- return state == filterValue
+ for _, filterValue := range filterValues {
+ if filterValue == "stopped" {
+ filterValue = "exited"
+ }
+ if state == filterValue {
+ return true
+ }
+ }
+ return false
}, nil
case "ancestor":
// This needs to refine to match docker
// - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant.
return func(c *libpod.Container) bool {
- containerConfig := c.Config()
- if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) {
- return true
+ for _, filterValue := range filterValues {
+ containerConfig := c.Config()
+ if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) {
+ return true
+ }
}
return false
}, nil
case "before":
- ctr, err := r.LookupContainer(filterValue)
- if err != nil {
- return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
+ var createTime time.Time
+ for _, filterValue := range filterValues {
+ ctr, err := r.LookupContainer(filterValue)
+ if err != nil {
+ return nil, err
+ }
+ containerConfig := ctr.Config()
+ if createTime.IsZero() || createTime.After(containerConfig.CreatedTime) {
+ createTime = containerConfig.CreatedTime
+ }
}
- containerConfig := ctr.Config()
- createTime := containerConfig.CreatedTime
return func(c *libpod.Container) bool {
cc := c.Config()
return createTime.After(cc.CreatedTime)
}, nil
case "since":
- ctr, err := r.LookupContainer(filterValue)
- if err != nil {
- return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
+ var createTime time.Time
+ for _, filterValue := range filterValues {
+ ctr, err := r.LookupContainer(filterValue)
+ if err != nil {
+ return nil, err
+ }
+ containerConfig := ctr.Config()
+ if createTime.IsZero() || createTime.After(containerConfig.CreatedTime) {
+ createTime = containerConfig.CreatedTime
+ }
}
- containerConfig := ctr.Config()
- createTime := containerConfig.CreatedTime
return func(c *libpod.Container) bool {
cc := c.Config()
return createTime.Before(cc.CreatedTime)
@@ -113,17 +146,27 @@ func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime)
return func(c *libpod.Container) bool {
containerConfig := c.Config()
var dest string
- arr := strings.Split(filterValue, ":")
- source := arr[0]
- if len(arr) == 2 {
- dest = arr[1]
- }
- for _, mount := range containerConfig.Spec.Mounts {
- if dest != "" && (mount.Source == source && mount.Destination == dest) {
- return true
+ for _, filterValue := range filterValues {
+ arr := strings.SplitN(filterValue, ":", 2)
+ source := arr[0]
+ if len(arr) == 2 {
+ dest = arr[1]
}
- if dest == "" && mount.Source == source {
- return true
+ for _, mount := range containerConfig.Spec.Mounts {
+ if dest != "" && (mount.Source == source && mount.Destination == dest) {
+ return true
+ }
+ if dest == "" && mount.Source == source {
+ return true
+ }
+ }
+ for _, vname := range containerConfig.NamedVolumes {
+ if dest != "" && (vname.Name == source && vname.Dest == dest) {
+ return true
+ }
+ if dest == "" && vname.Name == source {
+ return true
+ }
}
}
return false
@@ -134,10 +177,18 @@ func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime)
if err != nil {
return false
}
- return hcStatus == filterValue
+ for _, filterValue := range filterValues {
+ if hcStatus == filterValue {
+ return true
+ }
+ }
+ return false
}, nil
case "until":
- ts, err := timetype.GetTimestamp(filterValue, time.Now())
+ if len(filterValues) != 1 {
+ return nil, errors.Errorf("specify exactly one timestamp for %s", filter)
+ }
+ ts, err := timetype.GetTimestamp(filterValues[0], time.Now())
if err != nil {
return nil, err
}
diff --git a/libpod/filters/pods.go b/libpod/filters/pods.go
index adce9784c..17b3f3ca9 100644
--- a/libpod/filters/pods.go
+++ b/libpod/filters/pods.go
@@ -13,7 +13,7 @@ import (
// GeneratePodFilterFunc takes a filter and filtervalue (key, value)
// and generates a libpod function that can be used to filter
// pods
-func GeneratePodFilterFunc(filter, filterValue string) (
+func GeneratePodFilterFunc(filter string, filterValues []string) (
func(pod *libpod.Pod) bool, error) {
switch filter {
case "ctr-ids":
@@ -22,7 +22,10 @@ func GeneratePodFilterFunc(filter, filterValue string) (
if err != nil {
return false
}
- return util.StringInSlice(filterValue, ctrIds)
+ for _, id := range ctrIds {
+ return util.StringMatchRegexSlice(id, filterValues)
+ }
+ return false
}, nil
case "ctr-names":
return func(p *libpod.Pod) bool {
@@ -31,9 +34,7 @@ func GeneratePodFilterFunc(filter, filterValue string) (
return false
}
for _, ctr := range ctrs {
- if filterValue == ctr.Name() {
- return true
- }
+ return util.StringMatchRegexSlice(ctr.Name(), filterValues)
}
return false
}, nil
@@ -43,18 +44,22 @@ func GeneratePodFilterFunc(filter, filterValue string) (
if err != nil {
return false
}
-
- fVint, err2 := strconv.Atoi(filterValue)
- if err2 != nil {
- return false
+ for _, filterValue := range filterValues {
+ fVint, err2 := strconv.Atoi(filterValue)
+ if err2 != nil {
+ return false
+ }
+ if len(ctrIds) == fVint {
+ return true
+ }
}
- return len(ctrIds) == fVint
+ return false
}, nil
case "ctr-status":
- if !util.StringInSlice(filterValue,
- []string{"created", "restarting", "running", "paused",
- "exited", "unknown"}) {
- return nil, errors.Errorf("%s is not a valid status", filterValue)
+ for _, filterValue := range filterValues {
+ if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
+ return nil, errors.Errorf("%s is not a valid status", filterValue)
+ }
}
return func(p *libpod.Pod) bool {
ctrStatuses, err := p.Status()
@@ -65,50 +70,69 @@ func GeneratePodFilterFunc(filter, filterValue string) (
state := ctrStatus.String()
if ctrStatus == define.ContainerStateConfigured {
state = "created"
+ } else if ctrStatus == define.ContainerStateStopped {
+ state = "exited"
}
- if state == filterValue {
- return true
+ for _, filterValue := range filterValues {
+ if filterValue == "stopped" {
+ filterValue = "exited"
+ }
+ if state == filterValue {
+ return true
+ }
}
}
return false
}, nil
case "id":
return func(p *libpod.Pod) bool {
- return strings.Contains(p.ID(), filterValue)
+ return util.StringMatchRegexSlice(p.ID(), filterValues)
}, nil
case "name":
return func(p *libpod.Pod) bool {
- return strings.Contains(p.Name(), filterValue)
+ return util.StringMatchRegexSlice(p.Name(), filterValues)
}, nil
case "status":
- if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created"}) {
- return nil, errors.Errorf("%s is not a valid pod status", filterValue)
+ for _, filterValue := range filterValues {
+ if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created", "degraded"}) {
+ return nil, errors.Errorf("%s is not a valid pod status", filterValue)
+ }
}
return func(p *libpod.Pod) bool {
status, err := p.GetPodStatus()
if err != nil {
return false
}
- if strings.ToLower(status) == filterValue {
- return true
+ for _, filterValue := range filterValues {
+ if strings.ToLower(status) == filterValue {
+ return true
+ }
}
return false
}, nil
case "label":
- var filterArray = strings.SplitN(filterValue, "=", 2)
- var filterKey = filterArray[0]
- if len(filterArray) > 1 {
- filterValue = filterArray[1]
- } else {
- filterValue = ""
- }
return func(p *libpod.Pod) bool {
- for labelKey, labelValue := range p.Labels() {
- if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
- return true
+ labels := p.Labels()
+ for _, filterValue := range filterValues {
+ matched := false
+ filterArray := strings.SplitN(filterValue, "=", 2)
+ filterKey := filterArray[0]
+ if len(filterArray) > 1 {
+ filterValue = filterArray[1]
+ } else {
+ filterValue = ""
+ }
+ for labelKey, labelValue := range labels {
+ if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ return false
}
}
- return false
+ return true
}, nil
}
return nil, errors.Errorf("%s is an invalid filter", filter)
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index bd55b852e..f77075893 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -223,7 +223,7 @@ func (c *Container) GetHealthCheckLog() (define.HealthCheckResults, error) {
}
b, err := ioutil.ReadFile(c.healthCheckLogPath())
if err != nil {
- return healthCheck, errors.Wrapf(err, "failed to read health check log file %s", c.healthCheckLogPath())
+ return healthCheck, errors.Wrap(err, "failed to read health check log file")
}
if err := json.Unmarshal(b, &healthCheck); err != nil {
return healthCheck, errors.Wrapf(err, "failed to unmarshal existing healthcheck results in %s", c.healthCheckLogPath())
diff --git a/libpod/healthcheck_linux.go b/libpod/healthcheck_linux.go
index b0f1ff35d..0ad15da09 100644
--- a/libpod/healthcheck_linux.go
+++ b/libpod/healthcheck_linux.go
@@ -26,6 +26,10 @@ func (c *Container) createTimer() error {
if rootless.IsRootless() {
cmd = append(cmd, "--user")
}
+ path := os.Getenv("PATH")
+ if path != "" {
+ cmd = append(cmd, "--setenv=PATH="+path)
+ }
cmd = append(cmd, "--unit", c.ID(), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID())
conn, err := systemd.ConnectToDBUS()
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 301954703..cecd64eb7 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -24,6 +24,7 @@ import (
"github.com/containers/image/v5/manifest"
ociarchive "github.com/containers/image/v5/oci/archive"
"github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/pkg/shortnames"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/tarball"
"github.com/containers/image/v5/transports"
@@ -164,7 +165,7 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
}
imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label)
if err != nil {
- return nil, errors.Wrapf(err, "unable to pull %s", name)
+ return nil, err
}
newImage, err := ir.NewFromLocal(imageName[0])
@@ -318,10 +319,8 @@ func (ir *Runtime) LoadAllImagesFromDockerArchive(ctx context.Context, fileName
}
goal := pullGoal{
- pullAllPairs: true,
- usedSearchRegistries: false,
- refPairs: refPairs,
- searchedRegistries: nil,
+ pullAllPairs: true,
+ refPairs: refPairs,
}
defer goal.cleanUp()
@@ -456,22 +455,19 @@ func (ir *Runtime) getLocalImage(inputName string) (string, *storage.Image, erro
return "", nil, errors.Wrapf(ErrNoSuchImage, imageError)
}
- // "Short-name image", so let's try out certain prefixes:
- // 1) DefaultLocalRegistry (i.e., "localhost/)
- // 2) Unqualified-search registries from registries.conf
- unqualifiedSearchRegistries, err := registries.GetRegistries()
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: registries.SystemRegistriesConfPath(),
+ }
+
+ candidates, err := shortnames.ResolveLocally(sys, inputName)
if err != nil {
return "", nil, err
}
- for _, candidate := range append([]string{DefaultLocalRegistry}, unqualifiedSearchRegistries...) {
- ref, err := decomposedImage.referenceWithRegistry(candidate)
- if err != nil {
- return "", nil, err
- }
- img, err := ir.store.Image(reference.TagNameOnly(ref).String())
+ for _, candidate := range candidates {
+ img, err := ir.store.Image(candidate.String())
if err == nil {
- return ref.String(), img, nil
+ return candidate.String(), img, nil
}
}
diff --git a/libpod/image/manifests.go b/libpod/image/manifests.go
index 59678fdb2..14f7c2f83 100644
--- a/libpod/image/manifests.go
+++ b/libpod/image/manifests.go
@@ -2,13 +2,14 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/buildah/manifests"
+ "github.com/containers/image/v5/docker"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Options for adding a manifest
@@ -69,19 +70,10 @@ func CreateManifestList(rt *Runtime, systemContext types.SystemContext, names []
list := manifests.Create()
opts := ManifestAddOpts{Images: names, All: all}
for _, img := range imgs {
- var ref types.ImageReference
- newImage, err := rt.NewFromLocal(img)
- if err == nil {
- ir, err := newImage.toImageRef(context.Background())
- if err != nil {
- return "", err
- }
- if ir == nil {
- return "", errors.New("unable to convert image to ImageReference")
- }
- ref = ir.Reference()
- } else {
- ref, err = alltransports.ParseImageName(img)
+ ref, err := alltransports.ParseImageName(img)
+ if err != nil {
+ dockerPrefix := fmt.Sprintf("%s://", docker.Transport.Name())
+ ref, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, img))
if err != nil {
return "", err
}
@@ -134,18 +126,10 @@ func addManifestToList(ref types.ImageReference, list manifests.List, systemCont
// AddManifest adds a manifest to a given manifest list.
func (i *Image) AddManifest(systemContext types.SystemContext, opts ManifestAddOpts) (string, error) {
- var (
- ref types.ImageReference
- )
- newImage, err := i.imageruntime.NewFromLocal(opts.Images[0])
- if err == nil {
- ir, err := newImage.toImageRef(context.Background())
- if err != nil {
- return "", err
- }
- ref = ir.Reference()
- } else {
- ref, err = alltransports.ParseImageName(opts.Images[0])
+ ref, err := alltransports.ParseImageName(opts.Images[0])
+ if err != nil {
+ dockerPrefix := fmt.Sprintf("%s://", docker.Transport.Name())
+ ref, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, opts.Images[0]))
if err != nil {
return "", err
}
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 65acdf427..2a2d16252 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
+ "os"
"path/filepath"
"strings"
@@ -15,13 +16,14 @@ import (
dockerarchive "github.com/containers/image/v5/docker/archive"
ociarchive "github.com/containers/image/v5/oci/archive"
oci "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/pkg/shortnames"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v2/libpod/events"
+ "github.com/containers/podman/v2/pkg/errorhandling"
"github.com/containers/podman/v2/pkg/registries"
- "github.com/hashicorp/go-multierror"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -56,9 +58,10 @@ var (
// pullRefPair records a pair of prepared image references to pull.
type pullRefPair struct {
- image string
- srcRef types.ImageReference
- dstRef types.ImageReference
+ image string
+ srcRef types.ImageReference
+ dstRef types.ImageReference
+ resolvedShortname *shortnames.PullCandidate // if set, must be recorded after successful pull
}
// cleanUpFunc is a function prototype for clean-up functions.
@@ -66,11 +69,11 @@ type cleanUpFunc func() error
// pullGoal represents the prepared image references and decided behavior to be executed by imagePull
type pullGoal struct {
- refPairs []pullRefPair
- pullAllPairs bool // Pull all refPairs instead of stopping on first success.
- usedSearchRegistries bool // refPairs construction has depended on registries.GetRegistries()
- searchedRegistries []string // The list of search registries used; set only if usedSearchRegistries
- cleanUpFuncs []cleanUpFunc // Mainly used to close long-lived objects (e.g., an archive.Reader)
+ refPairs []pullRefPair
+ pullAllPairs bool // Pull all refPairs instead of stopping on first success.
+ cleanUpFuncs []cleanUpFunc // Mainly used to close long-lived objects (e.g., an archive.Reader)
+ shortName string // Set when pulling a short name
+ resolved *shortnames.Resolved // Set when pulling a short name
}
// cleanUp invokes all cleanUpFuncs. Certain resources may not be available
@@ -86,10 +89,8 @@ func (p *pullGoal) cleanUp() {
// singlePullRefPairGoal returns a no-frills pull goal for the specified reference pair.
func singlePullRefPairGoal(rp pullRefPair) *pullGoal {
return &pullGoal{
- refPairs: []pullRefPair{rp},
- pullAllPairs: false, // Does not really make a difference.
- usedSearchRegistries: false,
- searchedRegistries: nil,
+ refPairs: []pullRefPair{rp},
+ pullAllPairs: false, // Does not really make a difference.
}
}
@@ -193,11 +194,9 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.
}
return &pullGoal{
- pullAllPairs: true,
- usedSearchRegistries: false,
- refPairs: pairs,
- searchedRegistries: nil,
- cleanUpFuncs: []cleanUpFunc{reader.Close},
+ pullAllPairs: true,
+ refPairs: pairs,
+ cleanUpFuncs: []cleanUpFunc{reader.Close},
}, nil
case OCIArchive:
@@ -267,7 +266,7 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
if srcTransport != nil && srcTransport.Name() != DockerTransport {
return nil, err
}
- goal, err = ir.pullGoalFromPossiblyUnqualifiedName(inputName)
+ goal, err = ir.pullGoalFromPossiblyUnqualifiedName(sc, writer, inputName)
if err != nil {
return nil, errors.Wrap(err, "error getting default registries to try")
}
@@ -325,7 +324,7 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
var (
images []string
- pullErrors *multierror.Error
+ pullErrors []error
)
for _, imageInfo := range goal.refPairs {
@@ -348,12 +347,17 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
_, err = cp.Image(ctx, policyContext, imageInfo.dstRef, imageInfo.srcRef, copyOptions)
return err
}, retryOptions); err != nil {
- pullErrors = multierror.Append(pullErrors, err)
+ pullErrors = append(pullErrors, err)
logrus.Debugf("Error pulling image ref %s: %v", imageInfo.srcRef.StringWithinTransport(), err)
if writer != nil {
_, _ = io.WriteString(writer, cleanErrorMessage(err))
}
} else {
+ if imageInfo.resolvedShortname != nil {
+ if err := imageInfo.resolvedShortname.Record(); err != nil {
+ logrus.Errorf("Error recording short-name alias %q: %v", imageInfo.resolvedShortname.Value.String(), err)
+ }
+ }
if !goal.pullAllPairs {
ir.newImageEvent(events.Pull, "")
return []string{imageInfo.image}, nil
@@ -361,68 +365,75 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
images = append(images, imageInfo.image)
}
}
- // If no image was found, we should handle. Lets be nicer to the user and see if we can figure out why.
+ // If no image was found, we should handle. Lets be nicer to the user
+ // and see if we can figure out why.
if len(images) == 0 {
- if goal.usedSearchRegistries && len(goal.searchedRegistries) == 0 {
- return nil, errors.Errorf("image name provided is a short name and no search registries are defined in the registries config file.")
- }
- // If the image passed in was fully-qualified, we will have 1 refpair. Bc the image is fq'd, we don't need to yap about registries.
- if !goal.usedSearchRegistries {
- if pullErrors != nil && len(pullErrors.Errors) > 0 { // this should always be true
- return nil, pullErrors.Errors[0]
- }
- return nil, errors.Errorf("unable to pull image, or you do not have pull access")
+ if goal.resolved != nil {
+ return nil, goal.resolved.FormatPullErrors(pullErrors)
}
- return nil, errors.Cause(pullErrors)
- }
- if len(images) > 0 {
- ir.newImageEvent(events.Pull, images[0])
+ return nil, errorhandling.JoinErrors(pullErrors)
}
+
+ ir.newImageEvent(events.Pull, images[0])
return images, nil
}
+// getShortNameMode looks up the `CONTAINERS_SHORT_NAME_ALIASING` environment
+// variable. If it's "on", return `nil` to use the defaults from
+// containers/image and the registries.conf files on the system. If it's
+// "off", empty or unset, return types.ShortNameModeDisabled to turn off
+// short-name aliasing by default.
+//
+// TODO: remove this function once we want to default to short-name aliasing.
+func getShortNameMode() *types.ShortNameMode {
+ env := os.Getenv("CONTAINERS_SHORT_NAME_ALIASING")
+ if strings.ToLower(env) == "on" {
+ return nil // default to whatever registries.conf and c/image decide
+ }
+ mode := types.ShortNameModeDisabled
+ return &mode
+}
+
// pullGoalFromPossiblyUnqualifiedName looks at inputName and determines the possible
// image references to try pulling in combination with the registries.conf file as well
-func (ir *Runtime) pullGoalFromPossiblyUnqualifiedName(inputName string) (*pullGoal, error) {
- decomposedImage, err := decompose(inputName)
+func (ir *Runtime) pullGoalFromPossiblyUnqualifiedName(sys *types.SystemContext, writer io.Writer, inputName string) (*pullGoal, error) {
+ if sys == nil {
+ sys = &types.SystemContext{}
+ }
+ sys.ShortNameMode = getShortNameMode()
+
+ resolved, err := shortnames.Resolve(sys, inputName)
if err != nil {
return nil, err
}
- if decomposedImage.hasRegistry {
- srcRef, err := docker.ParseReference("//" + inputName)
- if err != nil {
- return nil, errors.Wrapf(err, "unable to parse '%s'", inputName)
+ if desc := resolved.Description(); len(desc) > 0 {
+ logrus.Debug(desc)
+ if writer != nil {
+ if _, err := writer.Write([]byte(desc + "\n")); err != nil {
+ return nil, err
+ }
}
- return ir.getSinglePullRefPairGoal(srcRef, inputName)
}
- searchRegistries, err := registries.GetRegistries()
- if err != nil {
- return nil, err
- }
- refPairs := make([]pullRefPair, 0, len(searchRegistries))
- for _, registry := range searchRegistries {
- ref, err := decomposedImage.referenceWithRegistry(registry)
+ refPairs := []pullRefPair{}
+ for i, candidate := range resolved.PullCandidates {
+ srcRef, err := docker.NewReference(candidate.Value)
if err != nil {
return nil, err
}
- imageName := ref.String()
- srcRef, err := docker.ParseReference("//" + imageName)
- if err != nil {
- return nil, errors.Wrapf(err, "unable to parse '%s'", imageName)
- }
- ps, err := ir.getPullRefPair(srcRef, imageName)
+ ps, err := ir.getPullRefPair(srcRef, candidate.Value.String())
if err != nil {
return nil, err
}
+ ps.resolvedShortname = &resolved.PullCandidates[i]
refPairs = append(refPairs, ps)
}
return &pullGoal{
- refPairs: refPairs,
- pullAllPairs: false,
- usedSearchRegistries: true,
- searchedRegistries: searchRegistries,
+ refPairs: refPairs,
+ pullAllPairs: false,
+ shortName: inputName,
+ resolved: resolved,
}, nil
}
diff --git a/libpod/image/pull_test.go b/libpod/image/pull_test.go
index 6cb80e8b5..2e1464ad3 100644
--- a/libpod/image/pull_test.go
+++ b/libpod/image/pull_test.go
@@ -278,15 +278,11 @@ func TestPullGoalFromImageReference(t *testing.T) {
assert.Equal(t, e.dstName, storageReferenceWithoutLocation(res.refPairs[i].dstRef), testDescription)
}
assert.Equal(t, c.expectedPullAllPairs, res.pullAllPairs, c.srcName)
- assert.False(t, res.usedSearchRegistries, c.srcName)
- assert.Nil(t, res.searchedRegistries, c.srcName)
}
}
}
-const registriesConfWithSearch = `[registries.search]
-registries = ['example.com', 'docker.io']
-`
+const registriesConfWithSearch = `unqualified-search-registries = ['example.com', 'docker.io']`
func TestPullGoalFromPossiblyUnqualifiedName(t *testing.T) {
const digestSuffix = "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
@@ -303,69 +299,58 @@ func TestPullGoalFromPossiblyUnqualifiedName(t *testing.T) {
ir, cleanup := newTestRuntime(t)
defer cleanup()
- // Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not
- // run in parallel unless they opt in by calling t.Parallel(). So don’t do that.
- oldRCP, hasRCP := os.LookupEnv("REGISTRIES_CONFIG_PATH")
- defer func() {
- if hasRCP {
- os.Setenv("REGISTRIES_CONFIG_PATH", oldRCP)
- } else {
- os.Unsetenv("REGISTRIES_CONFIG_PATH")
- }
- }()
- os.Setenv("REGISTRIES_CONFIG_PATH", registriesConf.Name())
+ sc := GetSystemContext("", "", false)
+
+ aliasesConf, err := ioutil.TempFile("", "short-name-aliases.conf")
+ require.NoError(t, err)
+ defer aliasesConf.Close()
+ defer os.Remove(aliasesConf.Name())
+ sc.UserShortNameAliasConfPath = aliasesConf.Name()
+ sc.SystemRegistriesConfPath = registriesConf.Name()
for _, c := range []struct {
- input string
- expected []pullRefStrings
- expectedUsedSearchRegistries bool
+ input string
+ expected []pullRefStrings
}{
- {"#", nil, false}, // Clearly invalid.
+ {"#", nil}, // Clearly invalid.
{ // Fully-explicit docker.io, name-only.
"docker.io/library/busybox",
// (The docker:// representation is shortened by c/image/docker.Reference but it refers to "docker.io/library".)
- []pullRefStrings{{"docker.io/library/busybox", "docker://busybox:latest", "docker.io/library/busybox:latest"}},
- false,
+ []pullRefStrings{{"docker.io/library/busybox:latest", "docker://busybox:latest", "docker.io/library/busybox:latest"}},
},
{ // docker.io with implied /library/, name-only.
"docker.io/busybox",
// (The docker:// representation is shortened by c/image/docker.Reference but it refers to "docker.io/library".)
- []pullRefStrings{{"docker.io/busybox", "docker://busybox:latest", "docker.io/library/busybox:latest"}},
- false,
+ []pullRefStrings{{"docker.io/library/busybox:latest", "docker://busybox:latest", "docker.io/library/busybox:latest"}},
},
{ // Qualified example.com, name-only.
"example.com/ns/busybox",
- []pullRefStrings{{"example.com/ns/busybox", "docker://example.com/ns/busybox:latest", "example.com/ns/busybox:latest"}},
- false,
+ []pullRefStrings{{"example.com/ns/busybox:latest", "docker://example.com/ns/busybox:latest", "example.com/ns/busybox:latest"}},
},
{ // Qualified example.com, name:tag.
"example.com/ns/busybox:notlatest",
[]pullRefStrings{{"example.com/ns/busybox:notlatest", "docker://example.com/ns/busybox:notlatest", "example.com/ns/busybox:notlatest"}},
- false,
},
{ // Qualified example.com, name@digest.
"example.com/ns/busybox" + digestSuffix,
[]pullRefStrings{{"example.com/ns/busybox" + digestSuffix, "docker://example.com/ns/busybox" + digestSuffix,
"example.com/ns/busybox" + digestSuffix}},
- false,
},
// Qualified example.com, name:tag@digest. This code is happy to try, but .srcRef parsing currently rejects such input.
- {"example.com/ns/busybox:notlatest" + digestSuffix, nil, false},
+ {"example.com/ns/busybox:notlatest" + digestSuffix, nil},
{ // Unqualified, single-name, name-only
"busybox",
[]pullRefStrings{
- {"example.com/busybox", "docker://example.com/busybox:latest", "example.com/busybox:latest"},
+ {"example.com/busybox:latest", "docker://example.com/busybox:latest", "example.com/busybox:latest"},
// (The docker:// representation is shortened by c/image/docker.Reference but it refers to "docker.io/library".)
- {"docker.io/library/busybox", "docker://busybox:latest", "docker.io/library/busybox:latest"},
+ {"docker.io/library/busybox:latest", "docker://busybox:latest", "docker.io/library/busybox:latest"},
},
- true,
},
{ // Unqualified, namespaced, name-only
"ns/busybox",
[]pullRefStrings{
- {"example.com/ns/busybox", "docker://example.com/ns/busybox:latest", "example.com/ns/busybox:latest"},
+ {"example.com/ns/busybox:latest", "docker://example.com/ns/busybox:latest", "example.com/ns/busybox:latest"},
},
- true,
},
{ // Unqualified, name:tag
"busybox:notlatest",
@@ -374,7 +359,6 @@ func TestPullGoalFromPossiblyUnqualifiedName(t *testing.T) {
// (The docker:// representation is shortened by c/image/docker.Reference but it refers to "docker.io/library".)
{"docker.io/library/busybox:notlatest", "docker://busybox:notlatest", "docker.io/library/busybox:notlatest"},
},
- true,
},
{ // Unqualified, name@digest
"busybox" + digestSuffix,
@@ -383,29 +367,22 @@ func TestPullGoalFromPossiblyUnqualifiedName(t *testing.T) {
// (The docker:// representation is shortened by c/image/docker.Reference but it refers to "docker.io/library".)
{"docker.io/library/busybox" + digestSuffix, "docker://busybox" + digestSuffix, "docker.io/library/busybox" + digestSuffix},
},
- true,
},
// Unqualified, name:tag@digest. This code is happy to try, but .srcRef parsing currently rejects such input.
- {"busybox:notlatest" + digestSuffix, nil, false},
+ {"busybox:notlatest" + digestSuffix, nil},
} {
- res, err := ir.pullGoalFromPossiblyUnqualifiedName(c.input)
+ res, err := ir.pullGoalFromPossiblyUnqualifiedName(sc, nil, c.input)
if len(c.expected) == 0 {
assert.Error(t, err, c.input)
} else {
assert.NoError(t, err, c.input)
for i, e := range c.expected {
- testDescription := fmt.Sprintf("%s #%d", c.input, i)
+ testDescription := fmt.Sprintf("%s #%d (%v)", c.input, i, res.refPairs)
assert.Equal(t, e.image, res.refPairs[i].image, testDescription)
assert.Equal(t, e.srcRef, transports.ImageName(res.refPairs[i].srcRef), testDescription)
assert.Equal(t, e.dstName, storageReferenceWithoutLocation(res.refPairs[i].dstRef), testDescription)
}
assert.False(t, res.pullAllPairs, c.input)
- assert.Equal(t, c.expectedUsedSearchRegistries, res.usedSearchRegistries, c.input)
- if !c.expectedUsedSearchRegistries {
- assert.Nil(t, res.searchedRegistries, c.input)
- } else {
- assert.Equal(t, []string{"example.com", "docker.io"}, res.searchedRegistries, c.input)
- }
}
}
}
diff --git a/libpod/image/utils.go b/libpod/image/utils.go
index 2538f429b..7429a7f10 100644
--- a/libpod/image/utils.go
+++ b/libpod/image/utils.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/types"
+ "github.com/containers/podman/v2/libpod/define"
"github.com/containers/storage"
"github.com/pkg/errors"
)
@@ -42,7 +43,7 @@ func findImageInRepotags(search imageParts, images []*Image) (*storage.Image, er
if len(results) == 0 {
return &storage.Image{}, errors.Errorf("unable to find a name and tag match for %s in repotags", searchName)
} else if len(results) > 1 {
- return &storage.Image{}, errors.Errorf("found multiple name and tag matches for %s in repotags", searchName)
+ return &storage.Image{}, errors.Wrapf(define.ErrMultipleImages, searchName)
}
return results[0], nil
}
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 0de25a6ef..6c0cde531 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -31,6 +31,9 @@ type InMemoryState struct {
ctrExecSessions map[string][]string
// Maps pod ID to a map of container ID to container struct.
podContainers map[string]map[string]*Container
+ ctrNetworks map[string][]string
+ // Maps container ID to network name to list of aliases.
+ ctrNetworkAliases map[string]map[string][]string
// Global name registry - ensures name uniqueness and performs lookups.
nameIndex *registrar.Registrar
// Global ID registry - ensures ID uniqueness and performs lookups.
@@ -65,6 +68,9 @@ func NewInMemoryState() (State, error) {
state.podContainers = make(map[string]map[string]*Container)
+ state.ctrNetworks = make(map[string][]string)
+ state.ctrNetworkAliases = make(map[string]map[string][]string)
+
state.nameIndex = registrar.NewRegistrar()
state.idIndex = truncindex.NewTruncIndex([]string{})
@@ -278,6 +284,27 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
return err
}
+ // Check networks
+ for _, net := range ctr.config.Networks {
+ if net == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names cannot be empty")
+ }
+ }
+
+ // Check network aliases
+ for network := range ctr.config.NetworkAliases {
+ inNet := false
+ for _, net := range ctr.config.Networks {
+ if net == network {
+ inNet = true
+ break
+ }
+ }
+ if !inNet {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s has network aliases for network %q but is not joined to network", ctr.ID(), network)
+ }
+ }
+
// There are potential race conditions with this
// But in-memory state is intended purely for testing and not production
// use, so this should be fine.
@@ -334,6 +361,19 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
s.addCtrToVolDependsMap(ctr.ID(), vol.Name)
}
+ // Add networks
+ newNets := make([]string, 0, len(ctr.config.Networks))
+ for _, net := range ctr.config.Networks {
+ if net == "" {
+ return define.ErrInvalidArg
+ }
+ newNets = append(newNets, net)
+ }
+ s.ctrNetworks[ctr.ID()] = newNets
+
+ // Add network aliases
+ s.ctrNetworkAliases[ctr.ID()] = ctr.config.NetworkAliases
+
return nil
}
@@ -396,6 +436,14 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
s.removeCtrFromVolDependsMap(ctr.ID(), vol.Name)
}
+ // Remove our network aliases
+ if _, ok := s.ctrNetworkAliases[ctr.ID()]; ok {
+ delete(s.ctrNetworkAliases, ctr.ID())
+ }
+ if _, ok := s.ctrNetworks[ctr.ID()]; ok {
+ delete(s.ctrNetworks, ctr.ID())
+ }
+
return nil
}
@@ -472,6 +520,173 @@ func (s *InMemoryState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
+// Get all networks this container is present in.
+func (s *InMemoryState) GetNetworks(ctr *Container) ([]string, error) {
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return nil, define.ErrNoSuchCtr
+ }
+
+ ctrNetworks, ok := s.ctrNetworks[ctr.ID()]
+ if !ok {
+ return nil, define.ErrNoSuchNetwork
+ }
+
+ return ctrNetworks, nil
+}
+
+// GetNetworkAliases returns network aliases for the given container in the
+// given network.
+func (s *InMemoryState) GetNetworkAliases(ctr *Container, network string) ([]string, error) {
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return nil, define.ErrNoSuchCtr
+ }
+
+ inNet := false
+ for _, net := range ctr.config.Networks {
+ if net == network {
+ inNet = true
+ }
+ }
+ if !inNet {
+ return nil, define.ErrInvalidArg
+ }
+
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if !ok {
+ return []string{}, nil
+ }
+ netAliases, ok := ctrAliases[network]
+ if !ok {
+ return []string{}, nil
+ }
+
+ return netAliases, nil
+}
+
+// GetAllNetworkAliases gets all network aliases for the given container.
+func (s *InMemoryState) GetAllNetworkAliases(ctr *Container) (map[string][]string, error) {
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return nil, define.ErrNoSuchCtr
+ }
+
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if !ok {
+ return map[string][]string{}, nil
+ }
+
+ return ctrAliases, nil
+}
+
+// NetworkConnect connects to the given network
+func (s *InMemoryState) NetworkConnect(ctr *Container, network string, aliases []string) error {
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return define.ErrNoSuchCtr
+ }
+
+ inNet := false
+ ctrNetworks, ok := s.ctrNetworks[ctr.ID()]
+ if !ok {
+ return define.ErrNoSuchNetwork
+ }
+ for _, net := range ctrNetworks {
+ if net == network {
+ inNet = true
+ }
+ }
+ if inNet {
+ return define.ErrNoSuchNetwork
+ }
+ s.ctrNetworks[ctr.ID()] = append(ctrNetworks, network)
+
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if !ok {
+ ctrAliases = make(map[string][]string)
+ s.ctrNetworkAliases[ctr.ID()] = ctrAliases
+ }
+ ctrAliases[network] = aliases
+
+ return nil
+}
+
+// Disconnect from the given network and remove all aliases in it.
+func (s *InMemoryState) NetworkDisconnect(ctr *Container, network string) error {
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return define.ErrNoSuchCtr
+ }
+
+ ctrNetworks, ok := s.ctrNetworks[ctr.ID()]
+ if !ok {
+ return define.ErrNoSuchNetwork
+ }
+ inNet := false
+ remainingNets := make([]string, 0, len(ctrNetworks))
+ for _, net := range ctrNetworks {
+ if net == network {
+ inNet = true
+ break
+ } else {
+ remainingNets = append(remainingNets, net)
+ }
+ }
+ if !inNet {
+ return define.ErrNoSuchNetwork
+ }
+ s.ctrNetworks[ctr.ID()] = remainingNets
+
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if !ok {
+ ctrAliases = make(map[string][]string)
+ s.ctrNetworkAliases[ctr.ID()] = ctrAliases
+ }
+ if _, ok := ctrAliases[network]; ok {
+ delete(ctrAliases, network)
+ }
+
+ return nil
+}
+
// GetContainerConfig returns a container config from the database by full ID
func (s *InMemoryState) GetContainerConfig(id string) (*ContainerConfig, error) {
ctr, err := s.LookupContainer(id)
@@ -1116,6 +1331,27 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error {
return err
}
+ // Check networks
+ for _, net := range ctr.config.Networks {
+ if net == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names cannot be empty")
+ }
+ }
+
+ // Check network aliases
+ for network := range ctr.config.NetworkAliases {
+ inNet := false
+ for _, net := range ctr.config.Networks {
+ if net == network {
+ inNet = true
+ break
+ }
+ }
+ if !inNet {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s has network aliases for network %q but is not joined to network", ctr.ID(), network)
+ }
+ }
+
// Retrieve pod containers list
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
@@ -1188,6 +1424,24 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error {
s.addCtrToDependsMap(ctr.ID(), depCtr)
}
+ // Add container to volume dependencies
+ for _, vol := range ctr.config.NamedVolumes {
+ s.addCtrToVolDependsMap(ctr.ID(), vol.Name)
+ }
+
+ // Add networks
+ newNets := make([]string, 0, len(ctr.config.Networks))
+ for _, net := range ctr.config.Networks {
+ if net == "" {
+ return define.ErrInvalidArg
+ }
+ newNets = append(newNets, net)
+ }
+ s.ctrNetworks[ctr.ID()] = newNets
+
+ // Add network aliases
+ s.ctrNetworkAliases[ctr.ID()] = ctr.config.NetworkAliases
+
return nil
}
@@ -1268,6 +1522,14 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
s.removeCtrFromDependsMap(ctr.ID(), depCtr)
}
+ // Remove our network aliases
+ if _, ok := s.ctrNetworkAliases[ctr.ID()]; ok {
+ delete(s.ctrNetworkAliases, ctr.ID())
+ }
+ if _, ok := s.ctrNetworks[ctr.ID()]; ok {
+ delete(s.ctrNetworks, ctr.ID())
+ }
+
return nil
}
diff --git a/libpod/kube.go b/libpod/kube.go
index cd5064c84..067e7827d 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -327,7 +327,7 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
period := *c.config.Spec.Linux.Resources.CPU.Period
if quota > 0 && period > 0 {
- cpuLimitMilli := int64(1000 * float64(quota) / float64(period))
+ cpuLimitMilli := int64(1000 * util.PeriodAndQuotaToCores(period, quota))
// Kubernetes: precision finer than 1m is not allowed
if cpuLimitMilli >= 1 {
diff --git a/libpod/network/config.go b/libpod/network/config.go
index a08e684d8..ce351129e 100644
--- a/libpod/network/config.go
+++ b/libpod/network/config.go
@@ -129,10 +129,21 @@ func (f FirewallConfig) Bytes() ([]byte, error) {
return json.MarshalIndent(f, "", "\t")
}
+// TuningConfig describes the tuning plugin
+type TuningConfig struct {
+ PluginType string `json:"type"`
+}
+
+// Bytes outputs the configuration as []byte
+func (f TuningConfig) Bytes() ([]byte, error) {
+ return json.MarshalIndent(f, "", "\t")
+}
+
// DNSNameConfig describes the dns container name resolution plugin config
type DNSNameConfig struct {
- PluginType string `json:"type"`
- DomainName string `json:"domainName"`
+ PluginType string `json:"type"`
+ DomainName string `json:"domainName"`
+ Capabilities map[string]bool `json:"capabilities"`
}
// Bytes outputs the configuration as []byte
diff --git a/libpod/network/create.go b/libpod/network/create.go
index bf11631bf..7e4fc574a 100644
--- a/libpod/network/create.go
+++ b/libpod/network/create.go
@@ -8,32 +8,29 @@ import (
"path/filepath"
"github.com/containernetworking/cni/pkg/version"
- "github.com/containers/podman/v2/libpod"
+ "github.com/containers/common/pkg/config"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/util"
"github.com/pkg/errors"
)
-func Create(name string, options entities.NetworkCreateOptions, r *libpod.Runtime) (*entities.NetworkCreateReport, error) {
+// Create the CNI network
+func Create(name string, options entities.NetworkCreateOptions, runtimeConfig *config.Config) (*entities.NetworkCreateReport, error) {
var fileName string
if err := isSupportedDriver(options.Driver); err != nil {
return nil, err
}
- config, err := r.GetConfig()
- if err != nil {
- return nil, err
- }
// Acquire a lock for CNI
- l, err := acquireCNILock(filepath.Join(config.Engine.TmpDir, LockFileName))
+ l, err := acquireCNILock(filepath.Join(runtimeConfig.Engine.TmpDir, LockFileName))
if err != nil {
return nil, err
}
defer l.releaseCNILock()
if len(options.MacVLAN) > 0 {
- fileName, err = createMacVLAN(r, name, options)
+ fileName, err = createMacVLAN(name, options, runtimeConfig)
} else {
- fileName, err = createBridge(r, name, options)
+ fileName, err = createBridge(name, options, runtimeConfig)
}
if err != nil {
return nil, err
@@ -41,60 +38,118 @@ func Create(name string, options entities.NetworkCreateOptions, r *libpod.Runtim
return &entities.NetworkCreateReport{Filename: fileName}, nil
}
+// validateBridgeOptions validate the bridge networking options
+func validateBridgeOptions(options entities.NetworkCreateOptions) error {
+ subnet := &options.Subnet
+ ipRange := &options.Range
+ gateway := options.Gateway
+ // if IPv6 is set an IPv6 subnet MUST be specified
+ if options.IPv6 && ((subnet.IP == nil) || (subnet.IP != nil && !IsIPv6(subnet.IP))) {
+ return errors.Errorf("ipv6 option requires an IPv6 --subnet to be provided")
+ }
+ // range and gateway depend on subnet
+ if subnet.IP == nil && (ipRange.IP != nil || gateway != nil) {
+ return errors.Errorf("every ip-range or gateway must have a corresponding subnet")
+ }
+
+ // if a range is given, we need to ensure it is "in" the network range.
+ if ipRange.IP != nil {
+ firstIP, err := FirstIPInSubnet(ipRange)
+ if err != nil {
+ return errors.Wrapf(err, "failed to get first IP address from ip-range")
+ }
+ lastIP, err := LastIPInSubnet(ipRange)
+ if err != nil {
+ return errors.Wrapf(err, "failed to get last IP address from ip-range")
+ }
+ if !subnet.Contains(firstIP) || !subnet.Contains(lastIP) {
+ return errors.Errorf("the ip range %s does not fall within the subnet range %s", ipRange.String(), subnet.String())
+ }
+ }
+
+ // if network is provided and if gateway is provided, make sure it is "in" network
+ if gateway != nil && !subnet.Contains(gateway) {
+ return errors.Errorf("gateway %s is not in valid for subnet %s", gateway.String(), subnet.String())
+ }
+
+ return nil
+
+}
+
// createBridge creates a CNI network
-func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreateOptions) (string, error) {
+func createBridge(name string, options entities.NetworkCreateOptions, runtimeConfig *config.Config) (string, error) {
+ var (
+ ipamRanges [][]IPAMLocalHostRangeConf
+ err error
+ routes []IPAMRoute
+ )
isGateway := true
ipMasq := true
- subnet := &options.Subnet
- ipRange := options.Range
- runtimeConfig, err := r.GetConfig()
- if err != nil {
+
+ // validate options
+ if err := validateBridgeOptions(options); err != nil {
return "", err
}
- // if range is provided, make sure it is "in" network
+
+ // For compatibility with the docker implementation:
+ // if IPv6 is enabled (it really means dual-stack) then an IPv6 subnet has to be provided, and one free network is allocated for IPv4
+ // if IPv6 is not specified the subnet may be specified and can be either IPv4 or IPv6 (podman, unlike docker, allows IPv6 only networks)
+ // If not subnet is specified an IPv4 subnet will be allocated
+ subnet := &options.Subnet
+ ipRange := &options.Range
+ gateway := options.Gateway
if subnet.IP != nil {
// if network is provided, does it conflict with existing CNI or live networks
err = ValidateUserNetworkIsAvailable(runtimeConfig, subnet)
- } else {
- // if no network is provided, figure out network
- subnet, err = GetFreeNetwork(runtimeConfig)
- }
- if err != nil {
- return "", err
- }
- gateway := options.Gateway
- if gateway == nil {
- // if no gateway is provided, provide it as first ip of network
- gateway = CalcGatewayIP(subnet)
- }
- // if network is provided and if gateway is provided, make sure it is "in" network
- if options.Subnet.IP != nil && options.Gateway != nil {
- if !subnet.Contains(gateway) {
- return "", errors.Errorf("gateway %s is not in valid for subnet %s", gateway.String(), subnet.String())
+ if err != nil {
+ return "", err
}
- }
- if options.Internal {
- isGateway = false
- ipMasq = false
- }
-
- // if a range is given, we need to ensure it is "in" the network range.
- if options.Range.IP != nil {
- if options.Subnet.IP == nil {
- return "", errors.New("you must define a subnet range to define an ip-range")
+ // obtain CNI subnet default route
+ defaultRoute, err := NewIPAMDefaultRoute(IsIPv6(subnet.IP))
+ if err != nil {
+ return "", err
+ }
+ routes = append(routes, defaultRoute)
+ // obtain CNI range
+ ipamRange, err := NewIPAMLocalHostRange(subnet, ipRange, gateway)
+ if err != nil {
+ return "", err
}
- firstIP, err := FirstIPInSubnet(&options.Range)
+ ipamRanges = append(ipamRanges, ipamRange)
+ }
+ // if no network is provided or IPv6 flag used, figure out the IPv4 network
+ if options.IPv6 || len(routes) == 0 {
+ subnetV4, err := GetFreeNetwork(runtimeConfig)
if err != nil {
return "", err
}
- lastIP, err := LastIPInSubnet(&options.Range)
+ // obtain IPv4 default route
+ defaultRoute, err := NewIPAMDefaultRoute(false)
if err != nil {
return "", err
}
- if !subnet.Contains(firstIP) || !subnet.Contains(lastIP) {
- return "", errors.Errorf("the ip range %s does not fall within the subnet range %s", options.Range.String(), subnet.String())
+ routes = append(routes, defaultRoute)
+ // the CNI bridge plugin does not need to set
+ // the range or gateway options explicitly
+ ipamRange, err := NewIPAMLocalHostRange(subnetV4, nil, nil)
+ if err != nil {
+ return "", err
}
+ ipamRanges = append(ipamRanges, ipamRange)
+ }
+
+ // create CNI config
+ ipamConfig, err := NewIPAMHostLocalConf(routes, ipamRanges)
+ if err != nil {
+ return "", err
}
+
+ if options.Internal {
+ isGateway = false
+ ipMasq = false
+ }
+
+ // obtain host bridge name
bridgeDeviceName, err := GetFreeDeviceName(runtimeConfig)
if err != nil {
return "", err
@@ -113,25 +168,15 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate
name = bridgeDeviceName
}
+ // create CNI plugin configuration
ncList := NewNcList(name, version.Current())
var plugins []CNIPlugins
- var routes []IPAMRoute
-
- defaultRoute, err := NewIPAMDefaultRoute(IsIPv6(subnet.IP))
- if err != nil {
- return "", err
- }
- routes = append(routes, defaultRoute)
- ipamConfig, err := NewIPAMHostLocalConf(subnet, routes, ipRange, gateway)
- if err != nil {
- return "", err
- }
-
// TODO need to iron out the role of isDefaultGW and IPMasq
bridge := NewHostLocalBridge(bridgeDeviceName, isGateway, false, ipMasq, ipamConfig)
plugins = append(plugins, bridge)
plugins = append(plugins, NewPortMapPlugin())
plugins = append(plugins, NewFirewallPlugin())
+ plugins = append(plugins, NewTuningPlugin())
// if we find the dnsname plugin or are rootless, we add configuration for it
// the rootless-cni-infra container has the dnsname plugin always installed
if (HasDNSNamePlugin(runtimeConfig.Network.CNIPluginDirs) || rootless.IsRootless()) && !options.DisableDNS {
@@ -151,7 +196,7 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate
return cniPathName, err
}
-func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreateOptions) (string, error) {
+func createMacVLAN(name string, options entities.NetworkCreateOptions, runtimeConfig *config.Config) (string, error) {
var (
plugins []CNIPlugins
)
@@ -160,17 +205,12 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
return "", err
}
- config, err := r.GetConfig()
- if err != nil {
- return "", err
- }
-
// Make sure the host-device exists
if !util.StringInSlice(options.MacVLAN, liveNetNames) {
return "", errors.Errorf("failed to find network interface %q", options.MacVLAN)
}
if len(name) > 0 {
- netNames, err := GetNetworkNamesFromFileSystem(config)
+ netNames, err := GetNetworkNamesFromFileSystem(runtimeConfig)
if err != nil {
return "", err
}
@@ -178,7 +218,7 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
return "", errors.Errorf("the network name %s is already used", name)
}
} else {
- name, err = GetFreeDeviceName(config)
+ name, err = GetFreeDeviceName(runtimeConfig)
if err != nil {
return "", err
}
@@ -191,7 +231,7 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
if err != nil {
return "", err
}
- cniPathName := filepath.Join(GetCNIConfDir(config), fmt.Sprintf("%s.conflist", name))
+ cniPathName := filepath.Join(GetCNIConfDir(runtimeConfig), fmt.Sprintf("%s.conflist", name))
err = ioutil.WriteFile(cniPathName, b, 0644)
return cniPathName, err
}
diff --git a/libpod/network/create_test.go b/libpod/network/create_test.go
new file mode 100644
index 000000000..16188e497
--- /dev/null
+++ b/libpod/network/create_test.go
@@ -0,0 +1,131 @@
+package network
+
+import (
+ "net"
+ "testing"
+
+ "github.com/containers/podman/v2/pkg/domain/entities"
+)
+
+func Test_validateBridgeOptions(t *testing.T) {
+
+ tests := []struct {
+ name string
+ subnet net.IPNet
+ ipRange net.IPNet
+ gateway net.IP
+ isIPv6 bool
+ wantErr bool
+ }{
+ {
+ name: "IPv4 subnet only",
+ subnet: net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 0)},
+ },
+ {
+ name: "IPv4 subnet and range",
+ subnet: net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 0)},
+ ipRange: net.IPNet{IP: net.IPv4(192, 168, 0, 128), Mask: net.IPv4Mask(255, 255, 255, 128)},
+ },
+ {
+ name: "IPv4 subnet and gateway",
+ subnet: net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 0)},
+ gateway: net.ParseIP("192.168.0.10"),
+ },
+ {
+ name: "IPv4 subnet, range and gateway",
+ subnet: net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 0)},
+ ipRange: net.IPNet{IP: net.IPv4(192, 168, 0, 128), Mask: net.IPv4Mask(255, 255, 255, 128)},
+ gateway: net.ParseIP("192.168.0.10"),
+ },
+ {
+ name: "IPv6 subnet only",
+ subnet: net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ },
+ {
+ name: "IPv6 subnet and range",
+ subnet: net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ ipRange: net.IPNet{IP: net.ParseIP("2001:DB8:0:0:1::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff:ffff::"))},
+ isIPv6: true,
+ },
+ {
+ name: "IPv6 subnet and gateway",
+ subnet: net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ gateway: net.ParseIP("2001:DB8::2"),
+ isIPv6: true,
+ },
+ {
+ name: "IPv6 subnet, range and gateway",
+ subnet: net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ ipRange: net.IPNet{IP: net.ParseIP("2001:DB8:0:0:1::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff:ffff::"))},
+ gateway: net.ParseIP("2001:DB8::2"),
+ isIPv6: true,
+ },
+ {
+ name: "IPv6 subnet, range and gateway without IPv6 option (PODMAN SUPPORTS IT UNLIKE DOCKEr)",
+ subnet: net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ ipRange: net.IPNet{IP: net.ParseIP("2001:DB8:0:0:1::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff:ffff::"))},
+ gateway: net.ParseIP("2001:DB8::2"),
+ isIPv6: false,
+ },
+ {
+ name: "range provided but not subnet",
+ ipRange: net.IPNet{IP: net.IPv4(192, 168, 0, 128), Mask: net.IPv4Mask(255, 255, 255, 128)},
+ wantErr: true,
+ },
+ {
+ name: "gateway provided but not subnet",
+ gateway: net.ParseIP("192.168.0.10"),
+ wantErr: true,
+ },
+ {
+ name: "IPv4 subnet but IPv6 required",
+ subnet: net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 0)},
+ ipRange: net.IPNet{IP: net.IPv4(192, 168, 0, 128), Mask: net.IPv4Mask(255, 255, 255, 128)},
+ gateway: net.ParseIP("192.168.0.10"),
+ isIPv6: true,
+ wantErr: true,
+ },
+ {
+ name: "IPv6 required but IPv4 options used",
+ subnet: net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 0)},
+ ipRange: net.IPNet{IP: net.IPv4(192, 168, 0, 128), Mask: net.IPv4Mask(255, 255, 255, 128)},
+ gateway: net.ParseIP("192.168.0.10"),
+ isIPv6: true,
+ wantErr: true,
+ },
+ {
+ name: "IPv6 required but not subnet provided",
+ isIPv6: true,
+ wantErr: true,
+ },
+ {
+ name: "range out of the subnet",
+ subnet: net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ ipRange: net.IPNet{IP: net.ParseIP("2001:1:1::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff:ffff::"))},
+ gateway: net.ParseIP("2001:DB8::2"),
+ isIPv6: true,
+ wantErr: true,
+ },
+ {
+ name: "gateway out of the subnet",
+ subnet: net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ gateway: net.ParseIP("2001::2"),
+ isIPv6: true,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ options := entities.NetworkCreateOptions{
+ Subnet: tt.subnet,
+ Range: tt.ipRange,
+ Gateway: tt.gateway,
+ IPv6: tt.isIPv6,
+ }
+ if err := validateBridgeOptions(options); (err != nil) != tt.wantErr {
+ t.Errorf("validateBridgeOptions() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/libpod/network/files.go b/libpod/network/files.go
index a2090491f..7f1e3ee18 100644
--- a/libpod/network/files.go
+++ b/libpod/network/files.go
@@ -14,6 +14,10 @@ import (
"github.com/pkg/errors"
)
+// ErrNoSuchNetworkInterface indicates that no network interface exists
+var ErrNoSuchNetworkInterface = errors.New("unable to find interface name for network")
+
+// GetCNIConfDir get CNI configuration directory
func GetCNIConfDir(configArg *config.Config) string {
if len(configArg.Network.NetworkConfigDir) < 1 {
dc, err := config.DefaultConfig()
@@ -141,7 +145,7 @@ func GetInterfaceNameFromConfig(path string) (string, error) {
}
}
if len(name) == 0 {
- return "", errors.New("unable to find interface name for network")
+ return "", ErrNoSuchNetworkInterface
}
return name, nil
}
diff --git a/libpod/network/netconflist.go b/libpod/network/netconflist.go
index 8187fdb39..ee9adce14 100644
--- a/libpod/network/netconflist.go
+++ b/libpod/network/netconflist.go
@@ -42,8 +42,7 @@ func NewHostLocalBridge(name string, isGateWay, isDefaultGW, ipMasq bool, ipamCo
}
// NewIPAMHostLocalConf creates a new IPAMHostLocal configfuration
-func NewIPAMHostLocalConf(subnet *net.IPNet, routes []IPAMRoute, ipRange net.IPNet, gw net.IP) (IPAMHostLocalConf, error) {
- var ipamRanges [][]IPAMLocalHostRangeConf
+func NewIPAMHostLocalConf(routes []IPAMRoute, ipamRanges [][]IPAMLocalHostRangeConf) (IPAMHostLocalConf, error) {
ipamConf := IPAMHostLocalConf{
PluginType: "host-local",
Routes: routes,
@@ -51,22 +50,19 @@ func NewIPAMHostLocalConf(subnet *net.IPNet, routes []IPAMRoute, ipRange net.IPN
//ResolveConf: "",
//DataDir: ""
}
- IPAMRange, err := newIPAMLocalHostRange(subnet, &ipRange, &gw)
- if err != nil {
- return ipamConf, err
- }
- ipamRanges = append(ipamRanges, IPAMRange)
+
ipamConf.Ranges = ipamRanges
return ipamConf, nil
}
-func newIPAMLocalHostRange(subnet *net.IPNet, ipRange *net.IPNet, gw *net.IP) ([]IPAMLocalHostRangeConf, error) { //nolint:interfacer
+// NewIPAMLocalHostRange create a new IPAM range
+func NewIPAMLocalHostRange(subnet *net.IPNet, ipRange *net.IPNet, gw net.IP) ([]IPAMLocalHostRangeConf, error) { //nolint:interfacer
var ranges []IPAMLocalHostRangeConf
hostRange := IPAMLocalHostRangeConf{
Subnet: subnet.String(),
}
// an user provided a range, we add it here
- if ipRange.IP != nil {
+ if ipRange != nil && ipRange.IP != nil {
first, err := FirstIPInSubnet(ipRange)
if err != nil {
return nil, err
@@ -123,12 +119,22 @@ func NewFirewallPlugin() FirewallConfig {
}
}
+// NewTuningPlugin creates a generic tuning section
+func NewTuningPlugin() TuningConfig {
+ return TuningConfig{
+ PluginType: "tuning",
+ }
+}
+
// NewDNSNamePlugin creates the dnsname config with a given
// domainname
func NewDNSNamePlugin(domainName string) DNSNameConfig {
+ caps := make(map[string]bool, 1)
+ caps["aliases"] = true
return DNSNameConfig{
- PluginType: "dnsname",
- DomainName: domainName,
+ PluginType: "dnsname",
+ DomainName: domainName,
+ Capabilities: caps,
}
}
diff --git a/libpod/network/netconflist_test.go b/libpod/network/netconflist_test.go
index 5893bf985..6bf1a9777 100644
--- a/libpod/network/netconflist_test.go
+++ b/libpod/network/netconflist_test.go
@@ -1,6 +1,7 @@
package network
import (
+ "net"
"reflect"
"testing"
)
@@ -36,3 +37,72 @@ func TestNewIPAMDefaultRoute(t *testing.T) {
})
}
}
+
+func TestNewIPAMLocalHostRange(t *testing.T) {
+ tests := []struct {
+ name string
+ subnet *net.IPNet
+ ipRange *net.IPNet
+ gw net.IP
+ want []IPAMLocalHostRangeConf
+ }{
+ {
+ name: "IPv4 subnet",
+ subnet: &net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 0)},
+ want: []IPAMLocalHostRangeConf{
+ {
+ Subnet: "192.168.0.0/24",
+ },
+ },
+ },
+ {
+ name: "IPv4 subnet, range and gateway",
+ subnet: &net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 0)},
+ ipRange: &net.IPNet{IP: net.IPv4(192, 168, 0, 128), Mask: net.IPv4Mask(255, 255, 255, 128)},
+ gw: net.ParseIP("192.168.0.10"),
+ want: []IPAMLocalHostRangeConf{
+ {
+ Subnet: "192.168.0.0/24",
+ RangeStart: "192.168.0.129",
+ RangeEnd: "192.168.0.255",
+ Gateway: "192.168.0.10",
+ },
+ },
+ },
+ {
+ name: "IPv6 subnet",
+ subnet: &net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ want: []IPAMLocalHostRangeConf{
+ {
+ Subnet: "2001:db8::/48",
+ },
+ },
+ },
+ {
+ name: "IPv6 subnet, range and gateway",
+ subnet: &net.IPNet{IP: net.ParseIP("2001:DB8::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff::"))},
+ ipRange: &net.IPNet{IP: net.ParseIP("2001:DB8:1:1::"), Mask: net.IPMask(net.ParseIP("ffff:ffff:ffff:ffff::"))},
+ gw: net.ParseIP("2001:DB8::2"),
+ want: []IPAMLocalHostRangeConf{
+ {
+ Subnet: "2001:db8::/48",
+ RangeStart: "2001:db8:1:1::1",
+ RangeEnd: "2001:db8:1:1:ffff:ffff:ffff:ffff",
+ Gateway: "2001:db8::2",
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := NewIPAMLocalHostRange(tt.subnet, tt.ipRange, tt.gw)
+ if err != nil {
+ t.Errorf("no error expected: %v", err)
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("NewIPAMLocalHostRange() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/libpod/network/network.go b/libpod/network/network.go
index 7327a1a7d..0febb52f6 100644
--- a/libpod/network/network.go
+++ b/libpod/network/network.go
@@ -10,6 +10,7 @@ import (
"github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v2/libpod/define"
+ "github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -181,21 +182,26 @@ func RemoveNetwork(config *config.Config, name string) error {
// Before we delete the configuration file, we need to make sure we can read and parse
// it to get the network interface name so we can remove that too
interfaceName, err := GetInterfaceNameFromConfig(cniPath)
- if err != nil {
- return errors.Wrapf(err, "failed to find network interface name in %q", cniPath)
- }
- liveNetworkNames, err := GetLiveNetworkNames()
- if err != nil {
- return errors.Wrapf(err, "failed to get live network names")
- }
- if util.StringInSlice(interfaceName, liveNetworkNames) {
- if err := RemoveInterface(interfaceName); err != nil {
- return errors.Wrapf(err, "failed to delete the network interface %q", interfaceName)
+ if err == nil {
+ // Don't try to remove the network interface if we are not root
+ if !rootless.IsRootless() {
+ liveNetworkNames, err := GetLiveNetworkNames()
+ if err != nil {
+ return errors.Wrapf(err, "failed to get live network names")
+ }
+ if util.StringInSlice(interfaceName, liveNetworkNames) {
+ if err := RemoveInterface(interfaceName); err != nil {
+ return errors.Wrapf(err, "failed to delete the network interface %q", interfaceName)
+ }
+ }
}
+ } else if err != ErrNoSuchNetworkInterface {
+ // Don't error if we couldn't find the network interface name
+ return err
}
// Remove the configuration file
if err := os.Remove(cniPath); err != nil {
- return errors.Wrapf(err, "failed to remove network configuration file %q", cniPath)
+ return errors.Wrap(err, "failed to remove network configuration")
}
return nil
}
diff --git a/libpod/network/subnet.go b/libpod/network/subnet.go
index 90f0cdfce..120038e57 100644
--- a/libpod/network/subnet.go
+++ b/libpod/network/subnet.go
@@ -54,14 +54,10 @@ func LastIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer
ones, bits := cidr.Mask.Size()
if ones == bits {
- return FirstIPInSubnet(cidr)
+ return cidr.IP, nil
}
- hostStart := ones / 8
- // Handle the first host byte
- cidr.IP[hostStart] |= 0xff & cidr.Mask[hostStart]
- // Fill the rest with ones
- for i := hostStart; i < len(cidr.IP); i++ {
- cidr.IP[i] = 0xff
+ for i := range cidr.IP {
+ cidr.IP[i] = cidr.IP[i] | ^cidr.Mask[i]
}
return cidr.IP, nil
}
@@ -73,6 +69,10 @@ func FirstIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer
if err != nil {
return nil, err
}
+ ones, bits := cidr.Mask.Size()
+ if ones == bits {
+ return cidr.IP, nil
+ }
cidr.IP[len(cidr.IP)-1]++
return cidr.IP, nil
}
diff --git a/libpod/network/subnet_test.go b/libpod/network/subnet_test.go
index 917c3be88..55b2443bd 100644
--- a/libpod/network/subnet_test.go
+++ b/libpod/network/subnet_test.go
@@ -33,3 +33,65 @@ func TestNextSubnet(t *testing.T) {
})
}
}
+
+func TestFirstIPInSubnet(t *testing.T) {
+ tests := []struct {
+ name string
+ args *net.IPNet
+ want net.IP
+ wantErr bool
+ }{
+ {"class b", parseCIDR("192.168.0.0/16"), net.ParseIP("192.168.0.1"), false},
+ {"class c", parseCIDR("192.168.1.0/24"), net.ParseIP("192.168.1.1"), false},
+ {"cidr /23", parseCIDR("192.168.0.0/23"), net.ParseIP("192.168.0.1"), false},
+ {"cidr /25", parseCIDR("192.168.1.0/25"), net.ParseIP("192.168.1.1"), false},
+ {"cidr /26", parseCIDR("172.16.1.128/26"), net.ParseIP("172.16.1.129"), false},
+ {"class a", parseCIDR("10.0.0.0/8"), net.ParseIP("10.0.0.1"), false},
+ {"cidr /32", parseCIDR("192.168.255.4/32"), net.ParseIP("192.168.255.4"), false},
+ {"cidr /31", parseCIDR("192.168.255.4/31"), net.ParseIP("192.168.255.5"), false},
+ }
+ for _, tt := range tests {
+ test := tt
+ t.Run(test.name, func(t *testing.T) {
+ got, err := FirstIPInSubnet(test.args)
+ if (err != nil) != test.wantErr {
+ t.Errorf("FirstIPInSubnet() error = %v, wantErr %v", err, test.wantErr)
+ return
+ }
+ if !got.Equal(test.want) {
+ t.Errorf("FirstIPInSubnet() got = %v, want %v", got, test.want)
+ }
+ })
+ }
+}
+
+func TestLastIPInSubnet(t *testing.T) {
+ tests := []struct {
+ name string
+ args *net.IPNet
+ want net.IP
+ wantErr bool
+ }{
+ {"class b", parseCIDR("192.168.0.0/16"), net.ParseIP("192.168.255.255"), false},
+ {"class c", parseCIDR("192.168.1.0/24"), net.ParseIP("192.168.1.255"), false},
+ {"cidr /23", parseCIDR("192.168.0.0/23"), net.ParseIP("192.168.1.255"), false},
+ {"cidr /25", parseCIDR("192.168.1.0/25"), net.ParseIP("192.168.1.127"), false},
+ {"cidr /26", parseCIDR("172.16.1.128/26"), net.ParseIP("172.16.1.191"), false},
+ {"class a", parseCIDR("10.0.0.0/8"), net.ParseIP("10.255.255.255"), false},
+ {"cidr /32", parseCIDR("192.168.255.4/32"), net.ParseIP("192.168.255.4"), false},
+ {"cidr /31", parseCIDR("192.168.255.4/31"), net.ParseIP("192.168.255.5"), false},
+ }
+ for _, tt := range tests {
+ test := tt
+ t.Run(test.name, func(t *testing.T) {
+ got, err := LastIPInSubnet(test.args)
+ if (err != nil) != test.wantErr {
+ t.Errorf("LastIPInSubnet() error = %v, wantErr %v", err, test.wantErr)
+ return
+ }
+ if !got.Equal(test.want) {
+ t.Errorf("LastIPInSubnet() got = %v, want %v", got, test.want)
+ }
+ })
+ }
+}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 9ff6e40b7..4e7ffaf81 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -13,6 +13,7 @@ import (
"os"
"os/exec"
"path/filepath"
+ "sort"
"strings"
"syscall"
"time"
@@ -20,6 +21,8 @@ import (
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/podman/v2/libpod/define"
+ "github.com/containers/podman/v2/libpod/events"
+ "github.com/containers/podman/v2/libpod/network"
"github.com/containers/podman/v2/pkg/errorhandling"
"github.com/containers/podman/v2/pkg/netns"
"github.com/containers/podman/v2/pkg/rootless"
@@ -32,16 +35,16 @@ import (
)
// Get an OCICNI network config
-func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP, staticMAC net.HardwareAddr) ocicni.PodNetwork {
+func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP, staticMAC net.HardwareAddr, netDescriptions ContainerNetworkDescriptions) ocicni.PodNetwork {
var networkKey string
if len(networks) > 0 {
- // This is inconsistent for >1 network, but it's probably the
+ // This is inconsistent for >1 ctrNetwork, but it's probably the
// best we can do.
networkKey = networks[0]
} else {
networkKey = r.netPlugin.GetDefaultNetworkName()
}
- network := ocicni.PodNetwork{
+ ctrNetwork := ocicni.PodNetwork{
Name: name,
Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
ID: id,
@@ -53,9 +56,12 @@ func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, port
// If we have extra networks, add them
if len(networks) > 0 {
- network.Networks = make([]ocicni.NetAttachment, len(networks))
+ ctrNetwork.Networks = make([]ocicni.NetAttachment, len(networks))
for i, netName := range networks {
- network.Networks[i].Name = netName
+ ctrNetwork.Networks[i].Name = netName
+ if eth, exists := netDescriptions.getInterfaceByName(netName); exists {
+ ctrNetwork.Networks[i].Ifname = eth
+ }
}
}
@@ -64,8 +70,8 @@ func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, port
// it's just the default.
if len(networks) == 0 {
// If len(networks) == 0 this is guaranteed to be the
- // default network.
- network.Networks = []ocicni.NetAttachment{{Name: networkKey}}
+ // default ctrNetwork.
+ ctrNetwork.Networks = []ocicni.NetAttachment{{Name: networkKey}}
}
var rt ocicni.RuntimeConfig = ocicni.RuntimeConfig{PortMappings: ports}
if staticIP != nil {
@@ -74,12 +80,12 @@ func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, port
if staticMAC != nil {
rt.MAC = staticMAC.String()
}
- network.RuntimeConfig = map[string]ocicni.RuntimeConfig{
+ ctrNetwork.RuntimeConfig = map[string]ocicni.RuntimeConfig{
networkKey: rt,
}
}
- return network
+ return ctrNetwork
}
// Create and configure a new network namespace for a container
@@ -102,19 +108,30 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
requestedMAC = ctr.config.StaticMAC
}
- // If we are in a pod use the pod name for the network, otherwise the container name
- var podName string
- if ctr.PodID() != "" {
- pod, err := r.GetPod(ctr.PodID())
- if err == nil {
- podName = pod.Name()
- }
+ podName := getCNIPodName(ctr)
+
+ networks, _, err := ctr.networks()
+ if err != nil {
+ return nil, err
}
- if podName == "" {
- podName = ctr.Name()
+ // All networks have been removed from the container.
+ // This is effectively forcing net=none.
+ if len(networks) == 0 {
+ return nil, nil
}
- podNetwork := r.getPodNetwork(ctr.ID(), podName, ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC)
+ // Update container map of interface descriptions
+ if err := ctr.setupNetworkDescriptions(networks); err != nil {
+ return nil, err
+ }
+ podNetwork := r.getPodNetwork(ctr.ID(), podName, ctrNS.Path(), networks, ctr.config.PortMappings, requestedIP, requestedMAC, ctr.state.NetInterfaceDescriptions)
+ aliases, err := ctr.runtime.state.GetAllNetworkAliases(ctr)
+ if err != nil {
+ return nil, err
+ }
+ if len(aliases) > 0 {
+ podNetwork.Aliases = aliases
+ }
results, err := r.netPlugin.SetUpPod(podNetwork)
if err != nil {
@@ -212,7 +229,11 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
if ctr.config.NetMode.IsSlirp4netns() {
return r.setupSlirp4netns(ctr)
}
- if len(ctr.config.Networks) > 0 {
+ networks, _, err := ctr.networks()
+ if err != nil {
+ return err
+ }
+ if len(networks) > 0 {
// set up port forwarder for CNI-in-slirp4netns
netnsPath := ctr.state.NetNS.Path()
// TODO: support slirp4netns port forwarder as well
@@ -559,7 +580,7 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
if stdoutStr != "" {
// err contains full debug log and too verbose, so return stdoutStr
logrus.Debug(err)
- return errors.Errorf("failed to expose ports via rootlessport: %q", stdoutStr)
+ return errors.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n"))
}
return err
}
@@ -728,8 +749,13 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID())
+ networks, _, err := ctr.networks()
+ if err != nil {
+ return err
+ }
+
// rootless containers do not use the CNI plugin directly
- if !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() {
+ if !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() && len(networks) > 0 {
var requestedIP net.IP
if ctr.requestedIP != nil {
requestedIP = ctr.requestedIP
@@ -748,7 +774,7 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
requestedMAC = ctr.config.StaticMAC
}
- podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC)
+ podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), networks, ctr.config.PortMappings, requestedIP, requestedMAC, ContainerNetworkDescriptions{})
if err := r.netPlugin.TearDownPod(podNetwork); err != nil {
return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID())
@@ -756,7 +782,7 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
}
// CNI-in-slirp4netns
- if rootless.IsRootless() && len(ctr.config.Networks) != 0 {
+ if rootless.IsRootless() && len(networks) != 0 {
if err := DeallocRootlessCNI(context.Background(), ctr); err != nil {
return errors.Wrapf(err, "error tearing down CNI-in-slirp4netns for container %s", ctr.ID())
}
@@ -842,13 +868,18 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
settings := new(define.InspectNetworkSettings)
settings.Ports = makeInspectPortBindings(c.config.PortMappings)
+ networks, isDefault, err := c.networks()
+ if err != nil {
+ return nil, err
+ }
+
// We can't do more if the network is down.
if c.state.NetNS == nil {
// We still want to make dummy configurations for each CNI net
// the container joined.
- if len(c.config.Networks) > 0 {
- settings.Networks = make(map[string]*define.InspectAdditionalNetwork, len(c.config.Networks))
- for _, net := range c.config.Networks {
+ if len(networks) > 0 && !isDefault {
+ settings.Networks = make(map[string]*define.InspectAdditionalNetwork, len(networks))
+ for _, net := range networks {
cniNet := new(define.InspectAdditionalNetwork)
cniNet.NetworkID = net
settings.Networks[net] = cniNet
@@ -867,16 +898,16 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
}
// If we have CNI networks - handle that here
- if len(c.config.Networks) > 0 {
- if len(c.config.Networks) != len(c.state.NetworkStatus) {
- return nil, errors.Wrapf(define.ErrInternal, "network inspection mismatch: asked to join %d CNI networks but have information on %d networks", len(c.config.Networks), len(c.state.NetworkStatus))
+ if len(networks) > 0 && !isDefault {
+ if len(networks) != len(c.state.NetworkStatus) {
+ return nil, errors.Wrapf(define.ErrInternal, "network inspection mismatch: asked to join %d CNI network(s) %v, but have information on %d network(s)", len(networks), networks, len(c.state.NetworkStatus))
}
settings.Networks = make(map[string]*define.InspectAdditionalNetwork)
// CNI results should be in the same order as the list of
// networks we pass into CNI.
- for index, name := range c.config.Networks {
+ for index, name := range networks {
cniResult := c.state.NetworkStatus[index]
addedNet := new(define.InspectAdditionalNetwork)
addedNet.NetworkID = name
@@ -885,6 +916,13 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
if err != nil {
return nil, err
}
+
+ aliases, err := c.runtime.state.GetNetworkAliases(c, name)
+ if err != nil {
+ return nil, err
+ }
+ addedNet.Aliases = aliases
+
addedNet.InspectBasicNetworkConfig = basicConfig
settings.Networks[name] = addedNet
@@ -910,6 +948,29 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
return settings, nil
}
+// setupNetworkDescriptions adds networks and eth values to the container's
+// network descriptions
+func (c *Container) setupNetworkDescriptions(networks []string) error {
+ // if the map is nil and we have networks
+ if c.state.NetInterfaceDescriptions == nil && len(networks) > 0 {
+ c.state.NetInterfaceDescriptions = make(ContainerNetworkDescriptions)
+ }
+ origLen := len(c.state.NetInterfaceDescriptions)
+ for _, n := range networks {
+ // if the network is not in the map, add it
+ if _, exists := c.state.NetInterfaceDescriptions[n]; !exists {
+ c.state.NetInterfaceDescriptions.add(n)
+ }
+ }
+ // if the map changed, we need to save the container state
+ if origLen != len(c.state.NetInterfaceDescriptions) {
+ if err := c.save(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// resultToBasicNetworkConfig produces an InspectBasicNetworkConfig from a CNI
// result
func resultToBasicNetworkConfig(result *cnitypes.Result) (define.InspectBasicNetworkConfig, error) {
@@ -959,3 +1020,169 @@ func (w *logrusDebugWriter) Write(p []byte) (int, error) {
logrus.Debugf("%s%s", w.prefix, string(p))
return len(p), nil
}
+
+// NetworkDisconnect removes a container from the network
+func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) error {
+ networks, err := c.networksByNameIndex()
+ if err != nil {
+ return err
+ }
+
+ exists, err := network.Exists(c.runtime.config, netName)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return errors.Wrap(define.ErrNoSuchNetwork, netName)
+ }
+
+ index, nameExists := networks[netName]
+ if !nameExists && len(networks) > 0 {
+ return errors.Errorf("container %s is not connected to network %s", nameOrID, netName)
+ }
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+
+ if c.state.State != define.ContainerStateRunning {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot disconnect container %s from networks as it is not running", nameOrID)
+ }
+ if c.state.NetNS == nil {
+ return errors.Wrapf(define.ErrNoNetwork, "unable to disconnect %s from %s", nameOrID, netName)
+ }
+ podConfig := c.runtime.getPodNetwork(c.ID(), c.Name(), c.state.NetNS.Path(), []string{netName}, c.config.PortMappings, nil, nil, c.state.NetInterfaceDescriptions)
+ if err := c.runtime.netPlugin.TearDownPod(podConfig); err != nil {
+ return err
+ }
+ if err := c.runtime.state.NetworkDisconnect(c, netName); err != nil {
+ return err
+ }
+
+ // update network status
+ networkStatus := c.state.NetworkStatus
+ // clip out the index of the network
+ tmpNetworkStatus := make([]*cnitypes.Result, len(networkStatus)-1)
+ for k, v := range networkStatus {
+ if index != k {
+ tmpNetworkStatus = append(tmpNetworkStatus, v)
+ }
+ }
+ c.state.NetworkStatus = tmpNetworkStatus
+ c.newNetworkEvent(events.NetworkDisconnect, netName)
+ return c.save()
+}
+
+// ConnnectNetwork connects a container to a given network
+func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) error {
+ networks, err := c.networksByNameIndex()
+ if err != nil {
+ return err
+ }
+
+ exists, err := network.Exists(c.runtime.config, netName)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return errors.Wrap(define.ErrNoSuchNetwork, netName)
+ }
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if err := c.syncContainer(); err != nil {
+ return err
+ }
+
+ if c.state.State != define.ContainerStateRunning {
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot connect container %s to networks as it is not running", nameOrID)
+ }
+ if c.state.NetNS == nil {
+ return errors.Wrapf(define.ErrNoNetwork, "unable to connect %s to %s", nameOrID, netName)
+ }
+ if err := c.runtime.state.NetworkConnect(c, netName, aliases); err != nil {
+ return err
+ }
+
+ ctrNetworks, _, err := c.networks()
+ if err != nil {
+ return err
+ }
+ // Update network descriptions
+ if err := c.setupNetworkDescriptions(ctrNetworks); err != nil {
+ return err
+ }
+ podConfig := c.runtime.getPodNetwork(c.ID(), c.Name(), c.state.NetNS.Path(), []string{netName}, c.config.PortMappings, nil, nil, c.state.NetInterfaceDescriptions)
+ podConfig.Aliases = make(map[string][]string, 1)
+ podConfig.Aliases[netName] = aliases
+ results, err := c.runtime.netPlugin.SetUpPod(podConfig)
+ if err != nil {
+ return err
+ }
+ if len(results) != 1 {
+ return errors.New("when adding aliases, results must be of length 1")
+ }
+
+ networkResults := make([]*cnitypes.Result, 0)
+ for _, r := range results {
+ resultCurrent, err := cnitypes.GetResult(r.Result)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result, err)
+ }
+ networkResults = append(networkResults, resultCurrent)
+ }
+
+ // update network status
+ networkStatus := c.state.NetworkStatus
+ // if len is one and we confirmed earlier that the container is in
+ // fact connected to the network, then just return an empty slice
+ if len(networkStatus) == 0 {
+ c.state.NetworkStatus = append(c.state.NetworkStatus, networkResults...)
+ } else {
+ // build a list of network names so we can sort and
+ // get the new name's index
+ var networkNames []string
+ for name := range networks {
+ networkNames = append(networkNames, name)
+ }
+ networkNames = append(networkNames, netName)
+ // sort
+ sort.Strings(networkNames)
+ // get index of new network name
+ index := sort.SearchStrings(networkNames, netName)
+ // Append a zero value to to the slice
+ networkStatus = append(networkStatus, &cnitypes.Result{})
+ // populate network status
+ copy(networkStatus[index+1:], networkStatus[index:])
+ networkStatus[index] = networkResults[0]
+ c.state.NetworkStatus = networkStatus
+ }
+ c.newNetworkEvent(events.NetworkConnect, netName)
+ return c.save()
+}
+
+// DisconnectContainerFromNetwork removes a container from its CNI network
+func (r *Runtime) DisconnectContainerFromNetwork(nameOrID, netName string, force bool) error {
+ if rootless.IsRootless() {
+ return errors.New("network connect is not enabled for rootless containers")
+ }
+ ctr, err := r.LookupContainer(nameOrID)
+ if err != nil {
+ return err
+ }
+ return ctr.NetworkDisconnect(nameOrID, netName, force)
+}
+
+// ConnectContainerToNetwork connects a container to a CNI network
+func (r *Runtime) ConnectContainerToNetwork(nameOrID, netName string, aliases []string) error {
+ if rootless.IsRootless() {
+ return errors.New("network disconnect is not enabled for rootless containers")
+ }
+ ctr, err := r.LookupContainer(nameOrID)
+ if err != nil {
+ return err
+ }
+ return ctr.NetworkConnect(nameOrID, netName, aliases)
+}
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
index 8651c1dc5..7068bf87a 100644
--- a/libpod/oci_conmon_exec_linux.go
+++ b/libpod/oci_conmon_exec_linux.go
@@ -444,10 +444,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
// }
// }
- conmonEnv, extraFiles, err := r.configureConmonEnv(c, runtimeDir)
- if err != nil {
- return nil, nil, err
- }
+ conmonEnv, extraFiles := r.configureConmonEnv(c, runtimeDir)
var filesToClose []*os.File
if options.PreserveFDs > 0 {
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 94630e57b..bd58610a2 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -32,6 +32,7 @@ import (
"github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/util"
"github.com/containers/podman/v2/utils"
+ "github.com/containers/storage/pkg/homedir"
pmount "github.com/containers/storage/pkg/mount"
"github.com/coreos/go-systemd/v22/activation"
"github.com/coreos/go-systemd/v22/daemon"
@@ -120,7 +121,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
if os.IsNotExist(err) {
continue
}
- return nil, errors.Wrapf(err, "cannot stat OCI runtime %s path %q", name, path)
+ return nil, errors.Wrapf(err, "cannot stat OCI runtime %s path", name)
}
if !stat.Mode().IsRegular() {
continue
@@ -1065,10 +1066,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
}
// 0, 1 and 2 are stdin, stdout and stderr
- conmonEnv, envFiles, err := r.configureConmonEnv(ctr, runtimeDir)
- if err != nil {
- return err
- }
+ conmonEnv, envFiles := r.configureConmonEnv(ctr, runtimeDir)
var filesToClose []*os.File
if ctr.config.PreserveFDs > 0 {
@@ -1268,16 +1266,15 @@ func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, se
// configureConmonEnv gets the environment values to add to conmon's exec struct
// TODO this may want to be less hardcoded/more configurable in the future
-func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string) ([]string, []*os.File, error) {
+func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string) ([]string, []*os.File) {
env := make([]string, 0, 6)
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
- home, err := util.HomeDir()
- if err != nil {
- return nil, nil, err
+ home := homedir.Get()
+ if home != "" {
+ env = append(env, fmt.Sprintf("HOME=%s", home))
}
- env = append(env, fmt.Sprintf("HOME=%s", home))
extraFiles := make([]*os.File, 0)
if ctr.config.SdNotifyMode == define.SdNotifyModeContainer {
@@ -1294,7 +1291,7 @@ func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string)
} else {
logrus.Debug("disabling SD notify")
}
- return env, extraFiles, nil
+ return env, extraFiles
}
// sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI
diff --git a/libpod/options.go b/libpod/options.go
index 1ffb78da9..0f55f34a3 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1296,7 +1296,7 @@ func WithRootFS(rootfs string) CtrCreateOption {
return define.ErrCtrFinalized
}
if _, err := os.Stat(rootfs); err != nil {
- return errors.Wrapf(err, "error checking path %q", rootfs)
+ return err
}
ctr.config.Rootfs = rootfs
return nil
@@ -1439,6 +1439,25 @@ func WithOverlayVolumes(volumes []*ContainerOverlayVolume) CtrCreateOption {
}
}
+// WithImageVolumes adds the given image volumes to the container.
+func WithImageVolumes(volumes []*ContainerImageVolume) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ for _, vol := range volumes {
+ ctr.config.ImageVolumes = append(ctr.config.ImageVolumes, &ContainerImageVolume{
+ Dest: vol.Dest,
+ Source: vol.Source,
+ ReadWrite: vol.ReadWrite,
+ })
+ }
+
+ return nil
+ }
+}
+
// WithHealthCheck adds the healthcheck to the container config
func WithHealthCheck(healthCheck *manifest.Schema2HealthConfig) CtrCreateOption {
return func(ctr *Container) error {
@@ -1487,6 +1506,20 @@ func WithCreateWorkingDir() CtrCreateOption {
}
}
+// WithNetworkAliases sets network aliases for the container.
+// Accepts a map of network name to aliases.
+func WithNetworkAliases(aliases map[string][]string) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ ctr.config.NetworkAliases = aliases
+
+ return nil
+ }
+}
+
// Volume Creation Options
// WithVolumeName sets the name of the volume.
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index f2ddba9c9..845948dd3 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -506,7 +506,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
})
ctrStatuses[c.ID()] = c.state.State
}
- podState, err := CreatePodStatusResults(ctrStatuses)
+ podState, err := createPodStatusResults(ctrStatuses)
if err != nil {
return nil, err
}
@@ -535,7 +535,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
infraConfig = new(define.InspectPodInfraConfig)
infraConfig.HostNetwork = p.config.InfraContainer.HostNetwork
infraConfig.StaticIP = p.config.InfraContainer.StaticIP
- infraConfig.StaticMAC = p.config.InfraContainer.StaticMAC
+ infraConfig.StaticMAC = p.config.InfraContainer.StaticMAC.String()
infraConfig.NoManageResolvConf = p.config.InfraContainer.UseImageResolvConf
infraConfig.NoManageHosts = p.config.InfraContainer.UseImageHosts
diff --git a/libpod/pod_status.go b/libpod/pod_status.go
index f4ccf308a..668d45ec7 100644
--- a/libpod/pod_status.go
+++ b/libpod/pod_status.go
@@ -10,10 +10,10 @@ func (p *Pod) GetPodStatus() (string, error) {
if err != nil {
return define.PodStateErrored, err
}
- return CreatePodStatusResults(ctrStatuses)
+ return createPodStatusResults(ctrStatuses)
}
-func CreatePodStatusResults(ctrStatuses map[string]define.ContainerStatus) (string, error) {
+func createPodStatusResults(ctrStatuses map[string]define.ContainerStatus) (string, error) {
ctrNum := len(ctrStatuses)
if ctrNum == 0 {
return define.PodStateCreated, nil
@@ -43,8 +43,10 @@ func CreatePodStatusResults(ctrStatuses map[string]define.ContainerStatus) (stri
}
switch {
- case statuses[define.PodStateRunning] > 0:
+ case statuses[define.PodStateRunning] == ctrNum:
return define.PodStateRunning, nil
+ case statuses[define.PodStateRunning] > 0:
+ return define.PodStateDegraded, nil
case statuses[define.PodStatePaused] == ctrNum:
return define.PodStatePaused, nil
case statuses[define.PodStateStopped] == ctrNum:
diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go
index 21e43ebd0..2c2977f9f 100644
--- a/libpod/rootless_cni_linux.go
+++ b/libpod/rootless_cni_linux.go
@@ -40,8 +40,12 @@ const (
//
// AllocRootlessCNI does not lock c. c should be already locked.
func AllocRootlessCNI(ctx context.Context, c *Container) (ns.NetNS, []*cnitypes.Result, error) {
- if len(c.config.Networks) == 0 {
- return nil, nil, errors.New("allocRootlessCNI shall not be called when len(c.config.Networks) == 0")
+ networks, _, err := c.networks()
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(networks) == 0 {
+ return nil, nil, errors.New("rootless CNI networking requires that the container has joined at least one CNI network")
}
l, err := getRootlessCNIInfraLock(c.runtime)
if err != nil {
@@ -53,9 +57,9 @@ func AllocRootlessCNI(ctx context.Context, c *Container) (ns.NetNS, []*cnitypes.
if err != nil {
return nil, nil, err
}
- k8sPodName := getPodOrContainerName(c) // passed to CNI as K8S_POD_NAME
- cniResults := make([]*cnitypes.Result, len(c.config.Networks))
- for i, nw := range c.config.Networks {
+ k8sPodName := getCNIPodName(c) // passed to CNI as K8S_POD_NAME
+ cniResults := make([]*cnitypes.Result, len(networks))
+ for i, nw := range networks {
cniRes, err := rootlessCNIInfraCallAlloc(infra, c.ID(), nw, k8sPodName)
if err != nil {
return nil, nil, err
@@ -77,8 +81,12 @@ func AllocRootlessCNI(ctx context.Context, c *Container) (ns.NetNS, []*cnitypes.
//
// DeallocRootlessCNI does not lock c. c should be already locked.
func DeallocRootlessCNI(ctx context.Context, c *Container) error {
- if len(c.config.Networks) == 0 {
- return errors.New("deallocRootlessCNI shall not be called when len(c.config.Networks) == 0")
+ networks, _, err := c.networks()
+ if err != nil {
+ return err
+ }
+ if len(networks) == 0 {
+ return errors.New("rootless CNI networking requires that the container has joined at least one CNI network")
}
l, err := getRootlessCNIInfraLock(c.runtime)
if err != nil {
@@ -91,7 +99,7 @@ func DeallocRootlessCNI(ctx context.Context, c *Container) error {
return nil
}
var errs *multierror.Error
- for _, nw := range c.config.Networks {
+ for _, nw := range networks {
err := rootlessCNIInfraCallDelloc(infra, c.ID(), nw)
if err != nil {
errs = multierror.Append(errs, err)
@@ -115,12 +123,16 @@ func getRootlessCNIInfraLock(r *Runtime) (lockfile.Locker, error) {
return lockfile.GetLockfile(fname)
}
-func getPodOrContainerName(c *Container) string {
- pod, err := c.runtime.GetPod(c.PodID())
- if err != nil || pod.config.Name == "" {
- return c.Name()
+// getCNIPodName return the pod name (hostname) used by CNI and the dnsname plugin.
+// If we are in the pod network namespace use the pod name otherwise the container name
+func getCNIPodName(c *Container) string {
+ if c.config.NetMode.IsPod() || c.IsInfra() {
+ pod, err := c.runtime.GetPod(c.PodID())
+ if err == nil {
+ return pod.Name()
+ }
}
- return pod.config.Name
+ return c.Name()
}
func rootlessCNIInfraCallAlloc(infra *Container, id, nw, k8sPodName string) (*cnitypes.Result, error) {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 792492db6..df3dfae2b 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -162,6 +162,10 @@ func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...R
runtime.config = conf
+ if err := SetXdgDirs(); err != nil {
+ return nil, err
+ }
+
storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil {
return nil, err
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index de73a9ff3..14b537ca2 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -345,8 +345,15 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Lock all named volumes we are adding ourself to, to ensure we can't
// use a volume being removed.
+ volsLocked := make(map[string]bool)
for _, namedVol := range ctrNamedVolumes {
toLock := namedVol
+ // Ensure that we don't double-lock a named volume that is used
+ // more than once.
+ if volsLocked[namedVol.Name()] {
+ continue
+ }
+ volsLocked[namedVol.Name()] = true
toLock.lock.Lock()
defer toLock.lock.Unlock()
}
@@ -918,6 +925,56 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) (map[string]int
return prunedContainers, pruneErrors, nil
}
+// MountStorageContainer mounts the storage container's root filesystem
+func (r *Runtime) MountStorageContainer(id string) (string, error) {
+ if _, err := r.GetContainer(id); err == nil {
+ return "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ }
+ container, err := r.store.Container(id)
+ if err != nil {
+ return "", err
+ }
+ mountPoint, err := r.store.Mount(container.ID, "")
+ if err != nil {
+ return "", errors.Wrapf(err, "error mounting storage for container %s", id)
+ }
+ return mountPoint, nil
+}
+
+// UnmountStorageContainer unmounts the storage container's root filesystem
+func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
+ if _, err := r.GetContainer(id); err == nil {
+ return false, errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ }
+ container, err := r.store.Container(id)
+ if err != nil {
+ return false, err
+ }
+ return r.store.Unmount(container.ID, force)
+}
+
+// MountedStorageContainer returns whether a storage container is mounted
+// along with the mount path
+func (r *Runtime) IsStorageContainerMounted(id string) (bool, string, error) {
+ var path string
+ if _, err := r.GetContainer(id); err == nil {
+ return false, "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
+ }
+
+ mountCnt, err := r.storageService.MountedContainerImage(id)
+ if err != nil {
+ return false, "", err
+ }
+ mounted := mountCnt > 0
+ if mounted {
+ path, err = r.storageService.GetMountpoint(id)
+ if err != nil {
+ return false, "", err
+ }
+ }
+ return mounted, path, nil
+}
+
// StorageContainers returns a list of containers from containers/storage that
// are not currently known to Podman.
func (r *Runtime) StorageContainers() ([]storage.Container, error) {
diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go
index 3dc38f442..1ad32fe9c 100644
--- a/libpod/runtime_migrate.go
+++ b/libpod/runtime_migrate.go
@@ -29,7 +29,7 @@ func stopPauseProcess() error {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrapf(err, "cannot read pause process pid file %s", pausePidPath)
+ return errors.Wrap(err, "cannot read pause process pid file")
}
pausePid, err := strconv.Atoi(string(data))
if err != nil {
diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go
index 7f58e86d8..76419587a 100644
--- a/libpod/runtime_pod_infra_linux.go
+++ b/libpod/runtime_pod_infra_linux.go
@@ -131,6 +131,7 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawIm
logrus.Debugf("Using %q as infra container entrypoint", entryCmd)
+ g.RemoveMount("/dev/shm")
if isRootless {
g.RemoveMount("/dev/pts")
devPts := spec.Mount{
diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go
index e4e6d87e6..055a243c0 100644
--- a/libpod/runtime_volume.go
+++ b/libpod/runtime_volume.go
@@ -86,8 +86,8 @@ func (r *Runtime) HasVolume(name string) (bool, error) {
// Volumes retrieves all volumes
// Filters can be provided which will determine which volumes are included in the
-// output. Multiple filters are handled by ANDing their output, so only volumes
-// matching all filters are returned
+// output. If multiple filters are used, a volume will be returned if
+// any of the filters are matched
func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) {
r.lock.RLock()
defer r.lock.RUnlock()
@@ -101,11 +101,15 @@ func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) {
return nil, err
}
+ if len(filters) == 0 {
+ return vols, nil
+ }
+
volsFiltered := make([]*Volume, 0, len(vols))
for _, vol := range vols {
- include := true
+ include := false
for _, filter := range filters {
- include = include && filter(vol)
+ include = include || filter(vol)
}
if include {
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 32fb1ef44..e1877b17d 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -75,7 +75,7 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
}
fullVolPath := filepath.Join(volPathRoot, "_data")
- if err := os.Mkdir(fullVolPath, 0755); err != nil {
+ if err := os.MkdirAll(fullVolPath, 0755); err != nil {
return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath)
}
if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
diff --git a/libpod/state.go b/libpod/state.go
index 44632b02f..074d21740 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -98,6 +98,19 @@ type State interface {
// returned.
AllContainers() ([]*Container, error)
+ // Get networks the container is currently connected to.
+ GetNetworks(ctr *Container) ([]string, error)
+ // Get network aliases for the given container in the given network.
+ GetNetworkAliases(ctr *Container, network string) ([]string, error)
+ // Get all network aliases for the given container.
+ GetAllNetworkAliases(ctr *Container) (map[string][]string, error)
+ // Add the container to the given network, adding the given aliases
+ // (if present).
+ NetworkConnect(ctr *Container, network string, aliases []string) error
+ // Remove the container from the given network, removing all aliases for
+ // the container in that network in the process.
+ NetworkDisconnect(ctr *Container, network string) error
+
// Return a container config from the database by full ID
GetContainerConfig(id string) (*ContainerConfig, error)
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 373feb6e0..da28f3d3f 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -1319,6 +1319,32 @@ func TestCannotUsePodAsDependency(t *testing.T) {
})
}
+func TestAddContainerEmptyNetworkNameErrors(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ testCtr.config.Networks = []string{""}
+
+ err = state.AddContainer(testCtr)
+ assert.Error(t, err)
+ })
+}
+
+func TestAddContainerNetworkAliasesButNoMatchingNetwork(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ testCtr.config.Networks = []string{"test1"}
+ testCtr.config.NetworkAliases = make(map[string][]string)
+ testCtr.config.NetworkAliases["test2"] = []string{"alias1"}
+
+ err = state.AddContainer(testCtr)
+ assert.Error(t, err)
+ })
+}
+
func TestCannotUseBadIDAsDependency(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
testCtr, err := getTestCtr1(manager)
diff --git a/libpod/util.go b/libpod/util.go
index 585b07aca..c26039c50 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -74,7 +74,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
return false, nil
}
if !os.IsNotExist(err) {
- return false, errors.Wrapf(err, "checking file %s", path)
+ return false, err
}
case <-time.After(25 * time.Millisecond):
// Check periodically for the file existence. It is needed
@@ -86,7 +86,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
return false, nil
}
if !os.IsNotExist(err) {
- return false, errors.Wrapf(err, "checking file %s", path)
+ return false, err
}
case <-timeoutChan:
return false, errors.Wrapf(define.ErrInternal, "timed out waiting for file %s", path)
@@ -184,11 +184,11 @@ func DefaultSeccompPath() (string, error) {
return config.SeccompOverridePath, nil
}
if !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "can't check if %q exists", config.SeccompOverridePath)
+ return "", err
}
if _, err := os.Stat(config.SeccompDefaultPath); err != nil {
if !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "can't check if %q exists", config.SeccompDefaultPath)
+ return "", err
}
return "", nil
}