summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml49
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md7
-rw-r--r--CONTRIBUTING.md4
-rw-r--r--contrib/cirrus/lib.sh11
-rwxr-xr-xcontrib/cirrus/setup_environment.sh7
-rw-r--r--go.mod2
-rw-r--r--go.sum4
-rw-r--r--libpod/boltdb_state.go468
-rw-r--r--libpod/boltdb_state_internal.go220
-rw-r--r--libpod/container_config.go9
-rw-r--r--libpod/container_validate.go11
-rw-r--r--libpod/define/errors.go10
-rw-r--r--libpod/in_memory_state.go393
-rw-r--r--libpod/options.go14
-rw-r--r--libpod/state.go15
-rw-r--r--libpod/state_test.go244
-rw-r--r--pkg/api/server/idle/tracker.go5
-rw-r--r--test/e2e/exec_test.go28
-rw-r--r--test/e2e/pod_infra_container_test.go6
-rw-r--r--troubleshooting.md6
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go96
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_others.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go100
-rw-r--r--vendor/github.com/containers/storage/userns.go125
-rw-r--r--vendor/github.com/containers/storage/utils.go6
-rw-r--r--vendor/modules.txt2
27 files changed, 1582 insertions, 264 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index b75e99184..1a109f5ba 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -21,13 +21,13 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
####
- FEDORA_NAME: "fedora-32"
- PRIOR_FEDORA_NAME: "fedora-31"
+ FEDORA_NAME: "fedora-33"
+ PRIOR_FEDORA_NAME: "fedora-32"
UBUNTU_NAME: "ubuntu-20"
PRIOR_UBUNTU_NAME: "ubuntu-19"
# Google-cloud VM Images
- IMAGE_SUFFIX: "c4948709391728640"
+ IMAGE_SUFFIX: "c6323493627232256"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
@@ -74,12 +74,8 @@ ext_svc_check_task:
env:
TEST_FLAVOR: ext_svc
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
- setup_script: &setup
- - 'cd $GOSRC/$SCRIPT_BASE || exit 1'
- - './setup_environment.sh'
- main_script: &main
- - 'cd $GOSRC/$SCRIPT_BASE || exit 1'
- - './runner.sh'
+ setup_script: &setup '$GOSRC/$SCRIPT_BASE/setup_environment.sh'
+ main_script: &main '$GOSRC/$SCRIPT_BASE/runner.sh'
# Execute some quick checks to confirm this YAML file and all
@@ -189,17 +185,13 @@ build_task:
clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR
setup_script: *setup
main_script: *main
- always: &artifacts
+ always: &binary_artifacts
gosrc_artifacts:
path: ./* # Grab everything in top-level $GOSRC
type: application/octet-stream
binary_artifacts:
path: ./bin/*
type: application/octet-stream
- # Required for `contrib/cirrus/logformatter` to work properly
- html_artifacts:
- path: ./*.html
- type: text/html
# Confirm the result of building on at least one platform appears sane.
@@ -228,7 +220,6 @@ validate_task:
clone_script: *noop
setup_script: *setup
main_script: *main
- always: *artifacts
# Exercise the "libpod" API with a small set of common
@@ -248,7 +239,6 @@ bindings_task:
clone_script: *noop # Comes from cache
setup_script: *setup
main_script: *main
- always: *artifacts
# Build the "libpod" API documentation `swagger.yaml` for eventual
@@ -267,7 +257,7 @@ swagger_task:
clone_script: *full_clone # build-cache not available to container tasks
setup_script: *setup
main_script: *main
- always: *artifacts
+ always: *binary_artifacts
endpoint_task:
@@ -285,7 +275,6 @@ endpoint_task:
clone_script: *full_clone # build-cache not available to container tasks
setup_script: *setup
main_script: *main
- always: *artifacts
# Check that all included go modules from other sources match
@@ -304,7 +293,6 @@ vendor_task:
clone_script: *full_clone # build-cache not available to container tasks
setup_script: *setup
main_script: *main
- always: *artifacts
# There are several other important variations of podman which
@@ -335,7 +323,8 @@ alt_build_task:
ALT_NAME: 'Build varlink-binaries'
setup_script: *setup
main_script: *main
- always: *artifacts
+ always: *binary_artifacts
+
# Confirm building a statically-linked binary is successful
static_alt_build_task:
@@ -346,7 +335,7 @@ static_alt_build_task:
- build
# Community-maintained task, may fail on occasion. If so, uncomment
# the next line and file an issue with details about the failure.
- # allow_failures: $CI == $CI
+ allow_failures: $CI == $CI
gce_instance: *bigvm
env:
<<: *stdenvars
@@ -364,7 +353,7 @@ static_alt_build_task:
fingerprint_script: cat nix/*
setup_script: *setup
main_script: *main
- always: *artifacts
+ always: *binary_artifacts
# Confirm building the remote client, natively on a Mac OS-X VM.
@@ -385,7 +374,7 @@ osx_alt_build_task:
- brew install go-md2man
- make podman-remote-darwin
- make install-podman-remote-darwin-docs
- always: *artifacts
+ always: *binary_artifacts
# This task is a stub: In the future it will be used to verify
@@ -405,7 +394,6 @@ docker-py_test_task:
clone_script: *noop # Comes from cache
setup_script: *setup
main_script: *main
- always: *artifacts
# Does exactly what it says, execute the podman unit-tests on all primary
@@ -424,7 +412,6 @@ unit_test_task:
gopath_cache: *ro_gopath_cache
setup_script: *setup
main_script: *main
- always: *artifacts
apiv2_test_task:
@@ -441,7 +428,10 @@ apiv2_test_task:
setup_script: *setup
main_script: *main
always: &logs_artifacts
- <<: *artifacts
+ # Required for `contrib/cirrus/logformatter` to work properly
+ html_artifacts:
+ path: ./*.html
+ type: text/html
package_versions_script: '$SCRIPT_BASE/logcollector.sh packages'
ginkgo_node_logs_script: '$SCRIPT_BASE/logcollector.sh ginkgo'
df_script: '$SCRIPT_BASE/logcollector.sh df'
@@ -515,6 +505,7 @@ container_integration_test_task:
main_script: *main
always: *logs_artifacts
+
# Execute most integration tests as a regular (non-root) user.
rootless_integration_test_task:
name: *std_name_fmt
@@ -584,6 +575,7 @@ rootless_system_test_task:
main_script: *main
always: *logs_artifacts
+
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
@@ -665,7 +657,8 @@ release_task:
clone_script: *noop # Comes from cache
setup_script: *setup
main_script: *main
- always: *artifacts
+ always: *binary_artifacts
+
# When preparing to release a new version, this task may be manually
# activated at the PR stage to verify the code is in a proper state.
@@ -686,4 +679,4 @@ release_test_task:
clone_script: *noop # Comes from cache
setup_script: *setup
main_script: *main
- always: *artifacts
+ always: *binary_artifacts
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..568cf7240
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,7 @@
+<!--
+Thanks for sending a pull request!
+
+Please make sure you've read our contributing guidelines and how to submit a pull request (https://github.com/containers/podman/blob/master/CONTRIBUTING.md#submitting-pull-requests).
+
+In case you're only changing docs, make sure to prefix the pull-request title with "[CI:DOCS]". That will prevent functional tests from running and save time and energy.
+-->
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 308c7b197..1d2c26750 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -157,6 +157,10 @@ when the PR is merged.
PRs will be approved by an [approver][owners] listed in [`OWNERS`](OWNERS).
+In case you're only changing docs, make sure to prefix the PR title with
+"[CI:DOCS]". That will prevent functional tests from running and save time and
+energy.
+
### Describe your Changes in Commit Messages
Describe your problem. Whether your patch is a one-line bug fix or 5000 lines
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 050fb16f3..04e8a3c1c 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -10,6 +10,9 @@ set -a
# handling of the (otherwise) default shell setup is non-uniform. Rather
# than attempt to workaround differences, simply force-load/set required
# items every time this library is utilized.
+_waserrexit=0
+if [[ "$SHELLOPTS" =~ errexit ]]; then _waserrexit=1; fi
+set +e # Assumed in F33 for setting global vars
source /etc/profile
source /etc/environment
if [[ -r "/etc/ci_environment" ]]; then source /etc/ci_environment; fi
@@ -18,6 +21,7 @@ HOME="$(getent passwd $USER | cut -d : -f 6)"
# Some platforms set and make this read-only
[[ -n "$UID" ]] || \
UID=$(getent passwd $USER | cut -d : -f 3)
+if ((_waserrexit)); then set -e; fi
# During VM Image build, the 'containers/automation' installation
# was performed. The final step of installation sets the library
@@ -25,11 +29,8 @@ HOME="$(getent passwd $USER | cut -d : -f 6)"
# default shell profile depending on distribution.
# shellcheck disable=SC2154
if [[ -n "$AUTOMATION_LIB_PATH" ]]; then
- for libname in defaults anchors console_output utils; do
- # There's no way shellcheck can process this location
- # shellcheck disable=SC1090
- source $AUTOMATION_LIB_PATH/${libname}.sh
- done
+ # shellcheck source=/usr/share/automation/lib/common_lib.sh
+ source $AUTOMATION_LIB_PATH/common_lib.sh
else
(
echo "WARNING: It does not appear that containers/automation was installed."
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index 8ccbd95d9..da175cc05 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -99,11 +99,12 @@ fi
case "$OS_RELEASE_ID" in
ubuntu*) ;;
fedora*)
- if ((CONTAINER==0)); then # Not yet running inside a container
+ if ((CONTAINER==0)); then
msg "Configuring / Expanding host storage."
# VM is setup to allow flexibility in testing alternate storage.
# For general use, simply make use of all available space.
- ooe.sh bash "$SCRIPT_BASE/add_second_partition.sh"
+ bash "$SCRIPT_BASE/add_second_partition.sh"
+ $SCRIPT_BASE/logcollector.sh df
# All SELinux distros need this for systemd-in-a-container
msg "Enabling container_manage_cgroup"
@@ -215,4 +216,4 @@ echo -e "\n# End of global variable definitions" \
>> /etc/ci_environment
msg "Global CI Environment vars.:"
-cat /etc/ci_environment | sort | indent
+grep -Ev '^#' /etc/ci_environment | sort | indent
diff --git a/go.mod b/go.mod
index bd9effa19..f7e211744 100644
--- a/go.mod
+++ b/go.mod
@@ -15,7 +15,7 @@ require (
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.7.0
github.com/containers/psgo v1.5.1
- github.com/containers/storage v1.23.8
+ github.com/containers/storage v1.23.9
github.com/coreos/go-systemd/v22 v22.1.0
github.com/cri-o/ocicni v0.2.0
github.com/cyphar/filepath-securejoin v0.2.2
diff --git a/go.sum b/go.sum
index ad40d8f83..603b394c6 100644
--- a/go.sum
+++ b/go.sum
@@ -101,8 +101,8 @@ github.com/containers/psgo v1.5.1/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzP
github.com/containers/storage v1.23.6/go.mod h1:haFs0HRowKwyzvWEx9EgI3WsL8XCSnBDb5f8P5CAxJY=
github.com/containers/storage v1.23.7 h1:43ImvG/npvQSZXRjaudVvKISIuZSfI6qvtSNQQSGO/A=
github.com/containers/storage v1.23.7/go.mod h1:cUT2zHjtx+WlVri30obWmM2gpqpi8jfPsmIzP1TVpEI=
-github.com/containers/storage v1.23.8 h1:Z3KKE9BkbW6CGOjIeTtvX+Dl9pFX8QgvSD2j/tS+r5E=
-github.com/containers/storage v1.23.8/go.mod h1:3b2ktpB6pw53SEeIoFfO0sQfP9+IoJJKPq5iJk74gxE=
+github.com/containers/storage v1.23.9 h1:qbgnTp76pLSyW3vYwY5GH4vk5cHYVXFJ+CsUEBp9TMw=
+github.com/containers/storage v1.23.9/go.mod h1:3b2ktpB6pw53SEeIoFfO0sQfP9+IoJJKPq5iJk74gxE=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 9dd5ca465..0b9b353c7 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -50,10 +50,12 @@ type BoltState struct {
// containers in the pod.
// - allPodsBkt: Map of ID to name containing only pods. Used for pod lookup
// operations.
-// - execBkt: Map of exec session ID to exec session - contains a sub-bucket for
-// each exec session in the DB.
-// - execRegistryBkt: Map of exec session ID to nothing. Contains one entry for
-// each exec session. Used for iterating through all exec sessions.
+// - execBkt: Map of exec session ID to container ID - used for resolving
+// exec session IDs to the containers that hold the exec session.
+// - aliasesBkt - Contains a bucket for each CNI network, which contain a map of
+// network alias (an extra name for containers in DNS) to the ID of the
+// container holding the alias. Aliases must be unique per-network, and cannot
+// conflict with names registered in nameRegistryBkt.
// - runtimeConfigBkt: Contains configuration of the libpod instance that
// initially created the database. This must match for any further instances
// that access the database, to ensure that state mismatches with
@@ -92,6 +94,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
volBkt,
allVolsBkt,
execBkt,
+ aliasesBkt,
runtimeConfigBkt,
}
@@ -969,6 +972,463 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
+// GetNetworkAliases retrieves the network aliases for the given container in
+// the given CNI network.
+func (s *BoltState) GetNetworkAliases(ctr *Container, network string) ([]string, error) {
+ if !s.valid {
+ return nil, define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ aliases := []string{}
+
+ err = db.View(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrNetworkBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworkBkt == nil {
+ // No networks joined, so no aliases
+ return nil
+ }
+
+ inNetwork := ctrNetworkBkt.Get([]byte(network))
+ if inNetwork == nil {
+ return errors.Wrapf(define.ErrNoAliases, "container %s is not part of network %s, no aliases found", ctr.ID(), network)
+ }
+
+ ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
+ if ctrAliasesBkt == nil {
+ // No aliases
+ return nil
+ }
+
+ netAliasesBkt := ctrAliasesBkt.Bucket([]byte(network))
+ if netAliasesBkt == nil {
+ return errors.Wrapf(define.ErrNoAliasesForNetwork, "container %s has no aliases for network %q", ctr.ID(), network)
+ }
+
+ return netAliasesBkt.ForEach(func(alias, v []byte) error {
+ aliases = append(aliases, string(alias))
+ return nil
+ })
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return aliases, nil
+}
+
+// GetAllNetworkAliases retrieves the network aliases for the given container in
+// all CNI networks.
+func (s *BoltState) GetAllNetworkAliases(ctr *Container) (map[string][]string, error) {
+ if !s.valid {
+ return nil, define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ aliases := make(map[string][]string)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
+ if ctrAliasesBkt == nil {
+ // No aliases present
+ return nil
+ }
+
+ ctrNetworkBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworkBkt == nil {
+ // No networks joined, so no aliases
+ return nil
+ }
+
+ return ctrNetworkBkt.ForEach(func(network, v []byte) error {
+ netAliasesBkt := ctrAliasesBkt.Bucket(network)
+ if netAliasesBkt == nil {
+ return nil
+ }
+
+ netAliases := []string{}
+
+ _ = netAliasesBkt.ForEach(func(alias, v []byte) error {
+ netAliases = append(netAliases, string(alias))
+ return nil
+ })
+
+ aliases[string(network)] = netAliases
+ return nil
+ })
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return aliases, nil
+}
+
+// SetNetworkAliases sets network aliases for the given container in the given
+// network. All existing aliases for that network (if any exist) will be removed,
+// to be replaced by the new aliases given.
+func (s *BoltState) SetNetworkAliases(ctr *Container, network string, aliases []string) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ return db.Update(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ allAliasesBucket, err := getAliasesBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ netAllAliasesBucket, err := allAliasesBucket.CreateBucketIfNotExists([]byte(network))
+ if err != nil {
+ return errors.Wrapf(err, "error creating network aliases bucket for network %s", network)
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
+ if ctrAliasesBkt == nil {
+ return errors.Wrapf(define.ErrNoAliases, "container %s has no network aliases", ctr.ID())
+ }
+
+ ctrNetworksBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworksBkt == nil {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s is not connected to any CNI networks, so cannot add aliases", ctr.ID())
+ }
+ netConnected := ctrNetworksBkt.Get([]byte(network))
+ if netConnected == nil {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s is not connected to CNI network %q, so cannot add aliases for this network", ctr.ID(), network)
+ }
+
+ namesBucket, err := getNamesBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ // Check if the container already has network aliases for this network.
+ netAliasesBkt := ctrAliasesBkt.Bucket([]byte(network))
+ if netAliasesBkt != nil {
+ // We have aliases. Have to remove them.
+ forEachErr := netAliasesBkt.ForEach(func(alias, v []byte) error {
+ // Relies on errors.Wrapf(nil, ...) returning
+ // nil.
+ return errors.Wrapf(netAllAliasesBucket.Delete(alias), "error removing alias %q from network %q when changing aliases for container %s", string(alias), network, ctr.ID())
+ })
+ if forEachErr != nil {
+ return forEachErr
+ }
+ }
+
+ if netAliasesBkt == nil {
+ newBkt, err := ctrAliasesBkt.CreateBucket([]byte(network))
+ if err != nil {
+ return errors.Wrapf(err, "could not create bucket for network aliases for network %q", network)
+ }
+ netAliasesBkt = newBkt
+ }
+
+ for _, alias := range aliases {
+ // Check if safe to use
+ aliasExists := netAllAliasesBucket.Get([]byte(alias))
+ if aliasExists != nil {
+ return errors.Wrapf(define.ErrAliasExists, "network alias %q already exists in network %q (used by container %s)", alias, network, string(aliasExists))
+ }
+ nameExists := namesBucket.Get([]byte(alias))
+ if nameExists != nil {
+ return errors.Wrapf(define.ErrCtrExists, "a container or pod already uses the name %q, cannot add network alias for container %s", alias, ctr.ID())
+ }
+
+ // Add alias
+ if err := netAliasesBkt.Put([]byte(alias), ctrID); err != nil {
+ return errors.Wrapf(err, "error adding container %s network %q alias %q to DB", ctr.ID(), network, alias)
+ }
+ if err := netAllAliasesBucket.Put([]byte(alias), ctrID); err != nil {
+ return errors.Wrapf(err, "error adding container %s network %q alias %q to all aliases in DB", ctr.ID(), network, alias)
+ }
+ }
+
+ return nil
+ })
+}
+
+// RemoveNetworkAliases removes network aliases of the given container in the
+// given network.
+func (s *BoltState) RemoveNetworkAliases(ctr *Container, network string) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ if s.namespace != "" && s.namespace != ctr.config.Namespace {
+ return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
+ }
+
+ ctrID := []byte(ctr.ID())
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ return db.Update(func(tx *bolt.Tx) error {
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ allAliasesBucket, err := getAliasesBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ netAllAliasesBucket, err := allAliasesBucket.CreateBucketIfNotExists([]byte(network))
+ if err != nil {
+ return errors.Wrapf(err, "error creating network aliases bucket for network %s", network)
+ }
+
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID())
+ }
+
+ ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
+ if ctrAliasesBkt == nil {
+ return errors.Wrapf(define.ErrNoAliases, "container %s has no network aliases", ctr.ID())
+ }
+
+ ctrNetworksBkt := dbCtr.Bucket(networksBkt)
+ if ctrNetworksBkt == nil {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s is not connected to any CNI networks, so cannot add aliases", ctr.ID())
+ }
+ netConnected := ctrNetworksBkt.Get([]byte(network))
+ if netConnected == nil {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s is not connected to CNI network %q, so cannot add aliases for this network", ctr.ID(), network)
+ }
+
+ // Check if the container already has network aliases for this network.
+ netAliasesBkt := ctrAliasesBkt.Bucket([]byte(network))
+ if netAliasesBkt != nil {
+ // We have aliases. Remove them.
+ forEachErr := netAliasesBkt.ForEach(func(alias, v []byte) error {
+ // Relies on errors.Wrapf(nil, ...) returning
+ // nil.
+ return errors.Wrapf(netAllAliasesBucket.Delete(alias), "error removing alias %q from network %q when changing aliases for container %s", string(alias), network, ctr.ID())
+ })
+ if forEachErr != nil {
+ return forEachErr
+ }
+ }
+
+ return nil
+ })
+}
+
+// Get all network aliases for a single CNI network. Returns a map of alias to
+// container ID.
+func (s *BoltState) GetAllAliasesForNetwork(network string) (map[string]string, error) {
+ if !s.valid {
+ return nil, define.ErrDBClosed
+ }
+
+ if network == "" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "network name must not be empty")
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return nil, err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ aliases := make(map[string]string)
+
+ err = db.View(func(tx *bolt.Tx) error {
+ aliasBucket, err := getAliasesBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ dbAlias := aliasBucket.Bucket([]byte(network))
+ if dbAlias == nil {
+ // We can't tell if the network exists, or doesn't exist
+ // So... Assume it exists, but has no aliases.
+ return nil
+ }
+
+ return dbAlias.ForEach(func(alias, ctrId []byte) error {
+ aliases[string(alias)] = string(ctrId)
+ return nil
+ })
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return aliases, nil
+}
+
+// RemoveAllAliasesForNetwork removes all the aliases in a given CNI network, as
+// part of that network being removed.
+func (s *BoltState) RemoveAllAliasesForNetwork(network string) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ return db.Update(func(tx *bolt.Tx) error {
+ allCtrsBucket, err := getAllCtrsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ ctrBucket, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ allAliasesBucket, err := getAliasesBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ checkAliasesBucketExists := allAliasesBucket.Bucket([]byte(network))
+ if checkAliasesBucketExists != nil {
+ if err := allAliasesBucket.DeleteBucket([]byte(network)); err != nil {
+ return errors.Wrapf(err, "error removing network %s aliases bucket from DB", network)
+ }
+ }
+
+ // Iterate through all containers and remove their aliases
+ // bucket for the network.
+ return allCtrsBucket.ForEach(func(ctrID, ctrName []byte) error {
+ dbCtr := ctrBucket.Bucket(ctrID)
+ if dbCtr == nil {
+ // DB State is inconsistent... but we can't do
+ // anything about it.
+ // Log and move on.
+ logrus.Errorf("Container %s listed in all containers, but has no bucket!", string(ctrID))
+ return nil
+ }
+
+ dbCtrAliases := dbCtr.Bucket(aliasesBkt)
+ if dbCtrAliases == nil {
+ // Container has no aliases, this is OK.
+ return nil
+ }
+
+ ctrNetAliases := dbCtrAliases.Bucket([]byte(network))
+ if ctrNetAliases != nil {
+ if err := dbCtrAliases.DeleteBucket([]byte(network)); err != nil {
+ return errors.Wrapf(err, "error removing bucket for network aliases for network %s from container %s", network, string(ctrID))
+ }
+ }
+ return nil
+ })
+ })
+}
+
// GetContainerConfig returns a container config from the database by full ID
func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) {
if len(id) == 0 {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 2f485318c..a48de3092 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -26,6 +26,7 @@ const (
volName = "vol"
allVolsName = "allVolumes"
execName = "exec"
+ aliasesName = "aliases"
runtimeConfigName = "runtime-config"
configName = "config"
@@ -36,6 +37,7 @@ const (
containersName = "containers"
podIDName = "pod-id"
namespaceName = "namespace"
+ networksName = "networks"
staticDirName = "static-dir"
tmpDirName = "tmp-dir"
@@ -47,26 +49,28 @@ const (
)
var (
- idRegistryBkt = []byte(idRegistryName)
- nameRegistryBkt = []byte(nameRegistryName)
- nsRegistryBkt = []byte(nsRegistryName)
- ctrBkt = []byte(ctrName)
- allCtrsBkt = []byte(allCtrsName)
- podBkt = []byte(podName)
- allPodsBkt = []byte(allPodsName)
- volBkt = []byte(volName)
- allVolsBkt = []byte(allVolsName)
- execBkt = []byte(execName)
- runtimeConfigBkt = []byte(runtimeConfigName)
-
- configKey = []byte(configName)
- stateKey = []byte(stateName)
+ idRegistryBkt = []byte(idRegistryName)
+ nameRegistryBkt = []byte(nameRegistryName)
+ nsRegistryBkt = []byte(nsRegistryName)
+ ctrBkt = []byte(ctrName)
+ allCtrsBkt = []byte(allCtrsName)
+ podBkt = []byte(podName)
+ allPodsBkt = []byte(allPodsName)
+ volBkt = []byte(volName)
+ allVolsBkt = []byte(allVolsName)
+ execBkt = []byte(execName)
+ aliasesBkt = []byte(aliasesName)
+ runtimeConfigBkt = []byte(runtimeConfigName)
dependenciesBkt = []byte(dependenciesName)
volDependenciesBkt = []byte(volCtrDependencies)
- netNSKey = []byte(netNSName)
- containersBkt = []byte(containersName)
- podIDKey = []byte(podIDName)
- namespaceKey = []byte(namespaceName)
+ networksBkt = []byte(networksName)
+
+ configKey = []byte(configName)
+ stateKey = []byte(stateName)
+ netNSKey = []byte(netNSName)
+ containersBkt = []byte(containersName)
+ podIDKey = []byte(podIDName)
+ namespaceKey = []byte(namespaceName)
staticDirKey = []byte(staticDirName)
tmpDirKey = []byte(tmpDirName)
@@ -350,6 +354,14 @@ func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
return bkt, nil
}
+func getAliasesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
+ bkt := tx.Bucket(aliasesBkt)
+ if bkt == nil {
+ return nil, errors.Wrapf(define.ErrDBBadConfig, "aliases bucket not found in DB")
+ }
+ return bkt, nil
+}
+
func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(runtimeConfigBkt)
if bkt == nil {
@@ -572,6 +584,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return err
}
+ allAliasesBkt, err := getAliasesBucket(tx)
+ if err != nil {
+ return err
+ }
+
// If a pod was given, check if it exists
var podDB *bolt.Bucket
var podCtrs *bolt.Bucket
@@ -618,6 +635,44 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return errors.Wrapf(err, "name \"%s\" is in use", ctr.Name())
}
+ // Check that we don't have any empty network names
+ for _, net := range ctr.config.Networks {
+ if net == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names cannot be an empty string")
+ }
+ }
+
+ // If we have network aliases, check if they are already in use.
+ for net, aliases := range ctr.config.NetworkAliases {
+ // Aliases cannot conflict with container names.
+ for _, alias := range aliases {
+ aliasExist := namesBucket.Get([]byte(alias))
+ if aliasExist != nil {
+ return errors.Wrapf(define.ErrCtrExists, "alias %q conflicts with existing container/pod name", alias)
+ }
+ }
+
+ netAliasesBkt := allAliasesBkt.Bucket([]byte(net))
+ if netAliasesBkt != nil {
+ for _, alias := range aliases {
+ aliasExist := netAliasesBkt.Get([]byte(alias))
+ if aliasExist != nil {
+ return errors.Wrapf(define.ErrAliasExists, "network alias %q already exists for network %q", net, alias)
+ }
+ }
+ }
+ hasNet := false
+ for _, testNet := range ctr.config.Networks {
+ if testNet == net {
+ hasNet = true
+ break
+ }
+ }
+ if !hasNet {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s has network aliases for network %q but is not part of that network", ctr.ID(), net)
+ }
+ }
+
// No overlapping containers
// Add the new container to the DB
if err := idsBucket.Put(ctrID, ctrName); err != nil {
@@ -635,6 +690,63 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return errors.Wrapf(err, "error adding container %s to all containers bucket in DB", ctr.ID())
}
+ // Check aliases for all networks, remove conflicts with the
+ // container name.
+ for _, net := range ctr.config.Networks {
+ netAliasesBkt := allAliasesBkt.Bucket([]byte(net))
+ if netAliasesBkt == nil {
+ continue
+ }
+
+ otherCtrID := netAliasesBkt.Get(ctrName)
+ if otherCtrID == nil {
+ continue
+ }
+
+ if err := netAliasesBkt.Delete(ctrName); err != nil {
+ return errors.Wrapf(err, "error removing container %s name from network aliases for network %s", ctr.ID(), net)
+ }
+
+ // We now need to remove from the other container.
+ // To do this, we work through the container bucket,
+ // then its aliases bucket, then its aliases for this
+ // specific network, then we remove the alias.
+ // Just slightly ridiculous. Just slightly.
+ otherCtr := ctrBucket.Bucket(otherCtrID)
+ if otherCtr == nil {
+ // The state is inconsistent, but we can't do
+ // much...
+ logrus.Errorf("Container %s referred to by network alias but not present in state", string(otherCtrID))
+ continue
+ }
+ otherCtrAliases := otherCtr.Bucket(aliasesBkt)
+ if otherCtrAliases == nil {
+ logrus.Errorf("Container %s is missing aliases but but has an alias", string(otherCtrID))
+ continue
+ }
+ otherCtrNetworkAliases := otherCtrAliases.Bucket([]byte(net))
+ if otherCtrNetworkAliases == nil {
+ logrus.Errorf("Container %s is missing network aliases bucket for network %s but has alias in that network", string(otherCtrID), net)
+ }
+ if otherCtrNetworkAliases.Get(ctrName) != nil {
+ if err := otherCtrNetworkAliases.Delete(ctrName); err != nil {
+ return errors.Wrapf(err, "error removing container %s name from network %s aliases of container %s", ctr.Name(), net, string(otherCtrID))
+ }
+ }
+ }
+
+ for net, aliases := range ctr.config.NetworkAliases {
+ netAliasesBkt, err := allAliasesBkt.CreateBucketIfNotExists([]byte(net))
+ if err != nil {
+ return errors.Wrapf(err, "error creating network aliases bucket for network %q", net)
+ }
+ for _, alias := range aliases {
+ if err := netAliasesBkt.Put([]byte(alias), ctrID); err != nil {
+ return errors.Wrapf(err, "error adding container %s network alias %q to network %q", ctr.ID(), alias, net)
+ }
+ }
+ }
+
newCtrBkt, err := ctrBucket.CreateBucket(ctrID)
if err != nil {
return errors.Wrapf(err, "error adding container %s bucket to DB", ctr.ID())
@@ -661,6 +773,35 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
return errors.Wrapf(err, "error adding container %s netns path to DB", ctr.ID())
}
}
+ if ctr.config.Networks != nil {
+ ctrNetworksBkt, err := newCtrBkt.CreateBucket(networksBkt)
+ if err != nil {
+ return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID())
+ }
+ for _, network := range ctr.config.Networks {
+ if err := ctrNetworksBkt.Put([]byte(network), ctrID); err != nil {
+ return errors.Wrapf(err, "error adding network %q to networks bucket for container %s", network, ctr.ID())
+ }
+ }
+ }
+ if ctr.config.NetworkAliases != nil {
+ ctrAliasesBkt, err := newCtrBkt.CreateBucket(aliasesBkt)
+ if err != nil {
+ return errors.Wrapf(err, "error creating network aliases bucket for container %s", ctr.ID())
+ }
+ for net, aliases := range ctr.config.NetworkAliases {
+ netAliasesBkt, err := ctrAliasesBkt.CreateBucket([]byte(net))
+ if err != nil {
+ return errors.Wrapf(err, "error creating network aliases bucket for network %q in container %s", net, ctr.ID())
+ }
+ for _, alias := range aliases {
+ if err := netAliasesBkt.Put([]byte(alias), ctrID); err != nil {
+ return errors.Wrapf(err, "error creating network alias %q in network %q for container %s", alias, net, ctr.ID())
+ }
+ }
+ }
+ }
+
if _, err := newCtrBkt.CreateBucket(dependenciesBkt); err != nil {
return errors.Wrapf(err, "error creating dependencies bucket for container %s", ctr.ID())
}
@@ -857,6 +998,49 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
return errors.Wrapf(define.ErrCtrExists, "container %s is a dependency of the following containers: %s", ctr.ID(), strings.Join(deps, ", "))
}
+ // Does the container have any network aliases?
+ ctrNetAliasesBkt := ctrExists.Bucket(aliasesBkt)
+ if ctrNetAliasesBkt != nil {
+ allAliasesBkt, err := getAliasesBucket(tx)
+ if err != nil {
+ return err
+ }
+ ctrNetworksBkt := ctrExists.Bucket(networksBkt)
+ // Internal state mismatch if this doesn't exist - we'll just
+ // assume there are no aliases in that case.
+ if ctrNetworksBkt != nil {
+ // This is a little gross. Iterate through all networks
+ // the container is joined to. Check if we have aliases
+ // for them. If we do have such aliases, remove all of
+ // then from the global aliases table for that network.
+ err = ctrNetworksBkt.ForEach(func(network, v []byte) error {
+ netAliasesBkt := ctrNetAliasesBkt.Bucket(network)
+ if netAliasesBkt == nil {
+ return nil
+ }
+ netAllAliasesBkt := allAliasesBkt.Bucket(network)
+ if netAllAliasesBkt == nil {
+ // Again the state is inconsistent here,
+ // but the best we can do is try and
+ // recover by ignoring it.
+ return nil
+ }
+ return netAliasesBkt.ForEach(func(alias, v []byte) error {
+ // We don't want to hard-fail on a
+ // missing alias, so continue if we hit
+ // errors.
+ if err := netAllAliasesBkt.Delete(alias); err != nil {
+ logrus.Errorf("Error removing alias %q from network %q when removing container %s", string(alias), string(network), ctr.ID())
+ }
+ return nil
+ })
+ })
+ if err != nil {
+ return err
+ }
+ }
+ }
+
if err := ctrBucket.DeleteBucket(ctrID); err != nil {
return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", ctr.ID())
}
diff --git a/libpod/container_config.go b/libpod/container_config.go
index 0006613bc..d73fbb42f 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -241,6 +241,15 @@ type ContainerNetworkConfig struct {
NetMode namespaces.NetworkMode `json:"networkMode,omitempty"`
// NetworkOptions are additional options for each network
NetworkOptions map[string][]string `json:"network_options,omitempty"`
+ // NetworkAliases are aliases that will be added to each network.
+ // These are additional names that this container can be accessed as via
+ // DNS when the CNI dnsname plugin is in use.
+ // Please note that these can be altered at runtime. As such, the actual
+ // list is stored in the database and should be retrieved from there;
+ // this is only the set of aliases the container was *created with*.
+ // Formatted as map of network name to aliases. All network names must
+ // be present in the Networks list above.
+ NetworkAliases map[string][]string `json:"network_alises,omitempty"`
}
// ContainerImageConfig is an embedded sub-config providing image configuration
diff --git a/libpod/container_validate.go b/libpod/container_validate.go
index 68cc095b7..fa809436e 100644
--- a/libpod/container_validate.go
+++ b/libpod/container_validate.go
@@ -115,5 +115,16 @@ func (c *Container) validate() error {
destinations[vol.Dest] = true
}
+ // Check that networks and network aliases match up.
+ ctrNets := make(map[string]bool)
+ for _, net := range c.config.Networks {
+ ctrNets[net] = true
+ }
+ for net := range c.config.NetworkAliases {
+ if _, ok := ctrNets[net]; !ok {
+ return errors.Wrapf(define.ErrNoSuchNetwork, "container tried to set network aliases for network %s but is not connected to the network", net)
+ }
+ }
+
return nil
}
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index 1b21cd1ce..27c5febf4 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -30,6 +30,13 @@ var (
// not exist.
ErrNoSuchExecSession = errors.New("no such exec session")
+ // ErrNoAliases indicates that the container does not have any network
+ // aliases.
+ ErrNoAliases = errors.New("no aliases for container")
+ // ErrNoAliasesForNetwork indicates that the container has no aliases
+ // for a specific network.
+ ErrNoAliasesForNetwork = errors.New("no aliases for network")
+
// ErrCtrExists indicates a container with the same name or ID already
// exists
ErrCtrExists = errors.New("container already exists")
@@ -42,6 +49,9 @@ var (
// ErrExecSessionExists indicates an exec session with the same ID
// already exists.
ErrExecSessionExists = errors.New("exec session already exists")
+ // ErrAliasExists indicates that a network alias with the same name
+ // already exists in the network.
+ ErrAliasExists = errors.New("alias already exists")
// ErrCtrStateInvalid indicates a container is in an improper state for
// the requested operation
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 0de25a6ef..ba4c70c6b 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -31,6 +31,10 @@ type InMemoryState struct {
ctrExecSessions map[string][]string
// Maps pod ID to a map of container ID to container struct.
podContainers map[string]map[string]*Container
+ // Maps network name to alias to container ID
+ networkAliases map[string]map[string]string
+ // Maps container ID to network name to list of aliases.
+ ctrNetworkAliases map[string]map[string][]string
// Global name registry - ensures name uniqueness and performs lookups.
nameIndex *registrar.Registrar
// Global ID registry - ensures ID uniqueness and performs lookups.
@@ -65,6 +69,9 @@ func NewInMemoryState() (State, error) {
state.podContainers = make(map[string]map[string]*Container)
+ state.networkAliases = make(map[string]map[string]string)
+ state.ctrNetworkAliases = make(map[string]map[string][]string)
+
state.nameIndex = registrar.NewRegistrar()
state.idIndex = truncindex.NewTruncIndex([]string{})
@@ -278,6 +285,40 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
return err
}
+ // Check networks
+ for _, net := range ctr.config.Networks {
+ if net == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names cannot be empty")
+ }
+ }
+
+ // Check network aliases
+ for network, aliases := range ctr.config.NetworkAliases {
+ inNet := false
+ for _, net := range ctr.config.Networks {
+ if net == network {
+ inNet = true
+ break
+ }
+ }
+ if !inNet {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s has network aliases for network %q but is not joined to network", ctr.ID(), network)
+ }
+
+ allNetAliases, ok := s.networkAliases[network]
+ if ok {
+ for _, alias := range aliases {
+ // Check if alias is a name
+ if _, err := s.nameIndex.Get(alias); err == nil {
+ return define.ErrInvalidArg
+ }
+ if _, ok := allNetAliases[alias]; ok {
+ return define.ErrAliasExists
+ }
+ }
+ }
+ }
+
// There are potential race conditions with this
// But in-memory state is intended purely for testing and not production
// use, so this should be fine.
@@ -334,6 +375,48 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
s.addCtrToVolDependsMap(ctr.ID(), vol.Name)
}
+ for _, network := range ctr.config.Networks {
+ allNetAliases, ok := s.networkAliases[network]
+ if !ok {
+ continue
+ }
+ otherCtrID, ok := allNetAliases[ctr.Name()]
+ if !ok {
+ continue
+ }
+ delete(allNetAliases, ctr.Name())
+
+ otherCtrAliases, ok := s.ctrNetworkAliases[otherCtrID]
+ if !ok {
+ continue
+ }
+ otherCtrNetAliases, ok := otherCtrAliases[network]
+ if !ok {
+ continue
+ }
+ newAliases := []string{}
+ for _, alias := range otherCtrNetAliases {
+ if alias != ctr.Name() {
+ newAliases = append(newAliases, alias)
+ }
+ }
+ otherCtrAliases[network] = newAliases
+ }
+
+ // Add network aliases
+ for network, aliases := range ctr.config.NetworkAliases {
+ allNetAliases, ok := s.networkAliases[network]
+ if !ok {
+ allNetAliases = make(map[string]string)
+ s.networkAliases[network] = allNetAliases
+ }
+
+ for _, alias := range aliases {
+ allNetAliases[alias] = ctr.ID()
+ }
+ }
+ s.ctrNetworkAliases[ctr.ID()] = ctr.config.NetworkAliases
+
return nil
}
@@ -396,6 +479,20 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
s.removeCtrFromVolDependsMap(ctr.ID(), vol.Name)
}
+ // Remove our network aliases
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if ok {
+ for network, aliases := range ctrAliases {
+ netAliases, ok := s.networkAliases[network]
+ if ok {
+ for _, alias := range aliases {
+ delete(netAliases, alias)
+ }
+ }
+ }
+ delete(s.ctrNetworkAliases, ctr.ID())
+ }
+
return nil
}
@@ -472,6 +569,207 @@ func (s *InMemoryState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
+// GetNetworkAliases returns network aliases for the given container in the
+// given network.
+func (s *InMemoryState) GetNetworkAliases(ctr *Container, network string) ([]string, error) {
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ return nil, define.ErrNoSuchCtr
+ }
+
+ inNet := false
+ for _, net := range ctr.config.Networks {
+ if net == network {
+ inNet = true
+ }
+ }
+ if !inNet {
+ return nil, define.ErrInvalidArg
+ }
+
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if !ok {
+ return []string{}, nil
+ }
+ netAliases, ok := ctrAliases[network]
+ if !ok {
+ return []string{}, nil
+ }
+
+ return netAliases, nil
+}
+
+// GetAllNetworkAliases gets all network aliases for the given container.
+func (s *InMemoryState) GetAllNetworkAliases(ctr *Container) (map[string][]string, error) {
+ if !ctr.valid {
+ return nil, define.ErrCtrRemoved
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ return nil, define.ErrNoSuchCtr
+ }
+
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if !ok {
+ return map[string][]string{}, nil
+ }
+
+ return ctrAliases, nil
+}
+
+// SetNetworkAliases sets network aliases for the given container in the given
+// network.
+func (s *InMemoryState) SetNetworkAliases(ctr *Container, network string, aliases []string) error {
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ return define.ErrNoSuchCtr
+ }
+
+ inNet := false
+ for _, net := range ctr.config.Networks {
+ if net == network {
+ inNet = true
+ }
+ }
+ if !inNet {
+ return define.ErrInvalidArg
+ }
+
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if !ok {
+ ctrAliases = make(map[string][]string)
+ s.ctrNetworkAliases[ctr.ID()] = ctrAliases
+ }
+ netAliases, ok := ctrAliases[network]
+ if !ok {
+ netAliases = []string{}
+ ctrAliases[network] = netAliases
+ }
+
+ allAliases, ok := s.networkAliases[network]
+ if !ok {
+ allAliases = make(map[string]string)
+ s.networkAliases[network] = allAliases
+ }
+
+ for _, alias := range netAliases {
+ delete(allAliases, alias)
+ }
+
+ for _, newAlias := range aliases {
+ if _, ok := allAliases[newAlias]; ok {
+ return define.ErrAliasExists
+ }
+ allAliases[newAlias] = ctr.ID()
+ }
+
+ ctrAliases[network] = aliases
+
+ return nil
+}
+
+// RemoveNetworkAliases removes network aliases from the given container in the
+// given network.
+func (s *InMemoryState) RemoveNetworkAliases(ctr *Container, network string) error {
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ ctr, ok := s.containers[ctr.ID()]
+ if !ok {
+ return define.ErrNoSuchCtr
+ }
+
+ inNet := false
+ for _, net := range ctr.config.Networks {
+ if net == network {
+ inNet = true
+ }
+ }
+ if !inNet {
+ return define.ErrInvalidArg
+ }
+
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if !ok {
+ ctrAliases = make(map[string][]string)
+ s.ctrNetworkAliases[ctr.ID()] = ctrAliases
+ }
+ netAliases, ok := ctrAliases[network]
+ if !ok {
+ netAliases = []string{}
+ ctrAliases[network] = netAliases
+ }
+
+ allAliases, ok := s.networkAliases[network]
+ if !ok {
+ allAliases = make(map[string]string)
+ s.networkAliases[network] = allAliases
+ }
+
+ for _, alias := range netAliases {
+ delete(allAliases, alias)
+ }
+
+ return nil
+}
+
+// GetAllAliasesForNetwork gets all the aliases for a single network.
+func (s *InMemoryState) GetAllAliasesForNetwork(network string) (map[string]string, error) {
+ if network == "" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ allAliases, ok := s.networkAliases[network]
+ if !ok {
+ // Can't tell if the network exists.
+ // Assume it does.
+ return map[string]string{}, nil
+ }
+
+ return allAliases, nil
+}
+
+// RemoveAllAliasesForNetwork removes all the aliases for a given network.
+func (s *InMemoryState) RemoveAllAliasesForNetwork(network string) error {
+ if network == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names must not be empty")
+ }
+
+ if _, ok := s.networkAliases[network]; ok {
+ delete(s.networkAliases, network)
+ }
+
+ for _, ctrAliases := range s.ctrNetworkAliases {
+ if _, ok := ctrAliases[network]; ok {
+ delete(ctrAliases, network)
+ }
+ }
+
+ return nil
+}
+
// GetContainerConfig returns a container config from the database by full ID
func (s *InMemoryState) GetContainerConfig(id string) (*ContainerConfig, error) {
ctr, err := s.LookupContainer(id)
@@ -1116,6 +1414,40 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error {
return err
}
+ // Check networks
+ for _, net := range ctr.config.Networks {
+ if net == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "network names cannot be empty")
+ }
+ }
+
+ // Check network aliases
+ for network, aliases := range ctr.config.NetworkAliases {
+ inNet := false
+ for _, net := range ctr.config.Networks {
+ if net == network {
+ inNet = true
+ break
+ }
+ }
+ if !inNet {
+ return errors.Wrapf(define.ErrInvalidArg, "container %s has network aliases for network %q but is not joined to network", ctr.ID(), network)
+ }
+
+ allNetAliases, ok := s.networkAliases[network]
+ if ok {
+ for _, alias := range aliases {
+ // Check if alias is a name
+ if _, err := s.nameIndex.Get(alias); err == nil {
+ return define.ErrInvalidArg
+ }
+ if _, ok := allNetAliases[alias]; ok {
+ return define.ErrAliasExists
+ }
+ }
+ }
+ }
+
// Retrieve pod containers list
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {
@@ -1188,6 +1520,53 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error {
s.addCtrToDependsMap(ctr.ID(), depCtr)
}
+ // Add container to volume dependencies
+ for _, vol := range ctr.config.NamedVolumes {
+ s.addCtrToVolDependsMap(ctr.ID(), vol.Name)
+ }
+
+ for _, network := range ctr.config.Networks {
+ allNetAliases, ok := s.networkAliases[network]
+ if !ok {
+ continue
+ }
+ otherCtrID, ok := allNetAliases[ctr.Name()]
+ if !ok {
+ continue
+ }
+ delete(allNetAliases, ctr.Name())
+
+ otherCtrAliases, ok := s.ctrNetworkAliases[otherCtrID]
+ if !ok {
+ continue
+ }
+ otherCtrNetAliases, ok := otherCtrAliases[network]
+ if !ok {
+ continue
+ }
+ newAliases := []string{}
+ for _, alias := range otherCtrNetAliases {
+ if alias != ctr.Name() {
+ newAliases = append(newAliases, alias)
+ }
+ }
+ otherCtrAliases[network] = newAliases
+ }
+
+ // Add network aliases
+ for network, aliases := range ctr.config.NetworkAliases {
+ allNetAliases, ok := s.networkAliases[network]
+ if !ok {
+ allNetAliases = make(map[string]string)
+ s.networkAliases[network] = allNetAliases
+ }
+
+ for _, alias := range aliases {
+ allNetAliases[alias] = ctr.ID()
+ }
+ }
+ s.ctrNetworkAliases[ctr.ID()] = ctr.config.NetworkAliases
+
return nil
}
@@ -1268,6 +1647,20 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
s.removeCtrFromDependsMap(ctr.ID(), depCtr)
}
+ // Remove our network aliases
+ ctrAliases, ok := s.ctrNetworkAliases[ctr.ID()]
+ if ok {
+ for network, aliases := range ctrAliases {
+ netAliases, ok := s.networkAliases[network]
+ if ok {
+ for _, alias := range aliases {
+ delete(netAliases, alias)
+ }
+ }
+ }
+ delete(s.ctrNetworkAliases, ctr.ID())
+ }
+
return nil
}
diff --git a/libpod/options.go b/libpod/options.go
index 060887b7e..0f55f34a3 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1506,6 +1506,20 @@ func WithCreateWorkingDir() CtrCreateOption {
}
}
+// WithNetworkAliases sets network aliases for the container.
+// Accepts a map of network name to aliases.
+func WithNetworkAliases(aliases map[string][]string) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ ctr.config.NetworkAliases = aliases
+
+ return nil
+ }
+}
+
// Volume Creation Options
// WithVolumeName sets the name of the volume.
diff --git a/libpod/state.go b/libpod/state.go
index 44632b02f..183f773b5 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -98,6 +98,21 @@ type State interface {
// returned.
AllContainers() ([]*Container, error)
+ // Get network aliases for the given container in the given network.
+ GetNetworkAliases(ctr *Container, network string) ([]string, error)
+ // Get all network aliases for the given container.
+ GetAllNetworkAliases(ctr *Container) (map[string][]string, error)
+ // Set network aliases for the given container in the given network.
+ SetNetworkAliases(ctr *Container, network string, aliases []string) error
+ // Remove network aliases for the given container in the given network.
+ RemoveNetworkAliases(ctr *Container, network string) error
+ // GetAllAliasesForNetwork returns all the aliases for a given
+ // network. Returns a map of alias to container ID.
+ GetAllAliasesForNetwork(network string) (map[string]string, error)
+ // RemoveAllAliasesForNetwork removes all the aliases for a given
+ // network.
+ RemoveAllAliasesForNetwork(network string) error
+
// Return a container config from the database by full ID
GetContainerConfig(id string) (*ContainerConfig, error)
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 373feb6e0..cf41270bf 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -1319,6 +1319,250 @@ func TestCannotUsePodAsDependency(t *testing.T) {
})
}
+func TestAddContainerEmptyNetworkNameErrors(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ testCtr.config.Networks = []string{""}
+
+ err = state.AddContainer(testCtr)
+ assert.Error(t, err)
+ })
+}
+
+func TestAddContainerNetworkAliasesButNoMatchingNetwork(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ testCtr.config.Networks = []string{"test1"}
+ testCtr.config.NetworkAliases = make(map[string][]string)
+ testCtr.config.NetworkAliases["test2"] = []string{"alias1"}
+
+ err = state.AddContainer(testCtr)
+ assert.Error(t, err)
+ })
+}
+
+func TestAddContainerNetworkAliasConflictWithName(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ netName := "testnet"
+ testCtr1.config.Networks = []string{netName}
+ testCtr1.config.NetworkAliases = make(map[string][]string)
+ testCtr1.config.NetworkAliases[netName] = []string{"alias1"}
+
+ testCtr2, err := getTestCtr2(manager)
+ assert.NoError(t, err)
+
+ testCtr2.config.Networks = []string{netName}
+ testCtr2.config.NetworkAliases = make(map[string][]string)
+ testCtr2.config.NetworkAliases[netName] = []string{testCtr1.Name()}
+
+ err = state.AddContainer(testCtr1)
+ assert.NoError(t, err)
+
+ err = state.AddContainer(testCtr2)
+ assert.Error(t, err)
+ })
+}
+
+func TestAddContainerNetworkAliasConflictWithAlias(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ netName := "testnet"
+ aliasName := "alias1"
+ testCtr1.config.Networks = []string{netName}
+ testCtr1.config.NetworkAliases = make(map[string][]string)
+ testCtr1.config.NetworkAliases[netName] = []string{aliasName}
+
+ testCtr2, err := getTestCtr2(manager)
+ assert.NoError(t, err)
+
+ testCtr2.config.Networks = []string{netName}
+ testCtr2.config.NetworkAliases = make(map[string][]string)
+ testCtr2.config.NetworkAliases[netName] = []string{aliasName}
+
+ err = state.AddContainer(testCtr1)
+ assert.NoError(t, err)
+
+ err = state.AddContainer(testCtr2)
+ assert.Error(t, err)
+ })
+}
+
+func TestAddContainerNetworkAliasConflictWithAliasButDifferentNets(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ netName := "testnet"
+ aliasName := "alias1"
+ testCtr1.config.Networks = []string{netName}
+ testCtr1.config.NetworkAliases = make(map[string][]string)
+ testCtr1.config.NetworkAliases[netName] = []string{aliasName}
+
+ testCtr2, err := getTestCtr2(manager)
+ assert.NoError(t, err)
+
+ netName2 := "testnet2"
+ testCtr2.config.Networks = []string{netName2}
+ testCtr2.config.NetworkAliases = make(map[string][]string)
+ testCtr2.config.NetworkAliases[netName2] = []string{aliasName}
+
+ err = state.AddContainer(testCtr1)
+ assert.NoError(t, err)
+
+ err = state.AddContainer(testCtr2)
+ assert.NoError(t, err)
+ })
+}
+
+func TestAddContainerNameConflictsWithAliasRemovesAlias(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ testCtr2, err := getTestCtr2(manager)
+ assert.NoError(t, err)
+
+ netName := "testnet"
+ aliasName := testCtr2.Name()
+ testCtr1.config.Networks = []string{netName}
+ testCtr1.config.NetworkAliases = make(map[string][]string)
+ testCtr1.config.NetworkAliases[netName] = []string{aliasName}
+
+ testCtr2.config.Networks = []string{netName}
+
+ err = state.AddContainer(testCtr1)
+ assert.NoError(t, err)
+
+ err = state.AddContainer(testCtr2)
+ assert.NoError(t, err)
+
+ aliases, err := state.GetNetworkAliases(testCtr1, netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(aliases))
+ })
+}
+
+func TestNetworkAliasAddAndRemoveSingleContainer(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ netName := "testnet"
+ testCtr.config.Networks = []string{netName}
+ testCtr.config.NetworkAliases = make(map[string][]string)
+ testCtr.config.NetworkAliases[netName] = []string{"alias1"}
+
+ startAliases, err := state.GetAllAliasesForNetwork(netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(startAliases))
+
+ err = state.AddContainer(testCtr)
+ assert.NoError(t, err)
+
+ oneAlias, err := state.GetAllAliasesForNetwork(netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(oneAlias))
+ assert.Equal(t, testCtr.ID(), oneAlias["alias1"])
+
+ allAliases, err := state.GetAllNetworkAliases(testCtr)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(allAliases))
+ netAliases, ok := allAliases[netName]
+ assert.True(t, ok)
+ assert.Equal(t, 1, len(netAliases))
+ assert.Equal(t, "alias1", netAliases[0])
+
+ ctrNetAliases, err := state.GetNetworkAliases(testCtr, netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(ctrNetAliases))
+ assert.Equal(t, "alias1", ctrNetAliases[0])
+
+ err = state.RemoveContainer(testCtr)
+ assert.NoError(t, err)
+
+ noAliases, err := state.GetAllAliasesForNetwork(netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(noAliases))
+ })
+}
+
+func TestNetworkAliasAddAndRemoveTwoContainers(t *testing.T) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
+ assert.NoError(t, err)
+
+ netName := "testnet"
+ testCtr1.config.Networks = []string{netName}
+ testCtr1.config.NetworkAliases = make(map[string][]string)
+ testCtr1.config.NetworkAliases[netName] = []string{"alias1"}
+
+ testCtr2, err := getTestCtr2(manager)
+ assert.NoError(t, err)
+
+ testCtr2.config.Networks = []string{netName}
+ testCtr2.config.NetworkAliases = make(map[string][]string)
+ testCtr2.config.NetworkAliases[netName] = []string{"alias2"}
+
+ startAliases, err := state.GetAllAliasesForNetwork(netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(startAliases))
+
+ err = state.AddContainer(testCtr1)
+ assert.NoError(t, err)
+
+ oneAlias, err := state.GetAllAliasesForNetwork(netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(oneAlias))
+ assert.Equal(t, testCtr1.ID(), oneAlias["alias1"])
+
+ err = state.AddContainer(testCtr2)
+ assert.NoError(t, err)
+
+ twoAliases, err := state.GetAllAliasesForNetwork(netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 2, len(twoAliases))
+ assert.Equal(t, testCtr1.ID(), twoAliases["alias1"])
+ assert.Equal(t, testCtr2.ID(), twoAliases["alias2"])
+
+ allAliases, err := state.GetAllNetworkAliases(testCtr1)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(allAliases))
+ netAliases, ok := allAliases[netName]
+ assert.True(t, ok)
+ assert.Equal(t, 1, len(netAliases))
+ assert.Equal(t, "alias1", netAliases[0])
+
+ ctrNetAliases, err := state.GetNetworkAliases(testCtr1, netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(ctrNetAliases))
+ assert.Equal(t, "alias1", ctrNetAliases[0])
+
+ err = state.RemoveContainer(testCtr2)
+ assert.NoError(t, err)
+
+ oneAlias, err = state.GetAllAliasesForNetwork(netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(oneAlias))
+ assert.Equal(t, testCtr1.ID(), oneAlias["alias1"])
+
+ err = state.RemoveContainer(testCtr1)
+ assert.NoError(t, err)
+
+ noAliases, err := state.GetAllAliasesForNetwork(netName)
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(noAliases))
+ })
+}
+
func TestCannotUseBadIDAsDependency(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
testCtr, err := getTestCtr1(manager)
diff --git a/pkg/api/server/idle/tracker.go b/pkg/api/server/idle/tracker.go
index 50e41b7bf..687ebd7d4 100644
--- a/pkg/api/server/idle/tracker.go
+++ b/pkg/api/server/idle/tracker.go
@@ -41,11 +41,12 @@ func (t *Tracker) ConnState(conn net.Conn, state http.ConnState) {
logrus.Debugf("IdleTracker %p:%v %dm+%dh/%dt connection(s)", conn, state, len(t.managed), t.hijacked, t.TotalConnections())
switch state {
- case http.StateNew, http.StateActive:
+ case http.StateNew:
+ t.total++
+ case http.StateActive:
// stop the API timer when the server transitions any connection to an "active" state
t.managed[conn] = struct{}{}
t.timer.Stop()
- t.total++
case http.StateHijacked:
// hijacked connections should call Close() when finished.
// Note: If a handler hijack's a connection and then doesn't Close() it,
diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go
index a698cd4b3..f61f52589 100644
--- a/test/e2e/exec_test.go
+++ b/test/e2e/exec_test.go
@@ -87,14 +87,12 @@ var _ = Describe("Podman exec", func() {
session := podmanTest.Podman([]string{"exec", "--env", "FOO=BAR", "test1", "printenv", "FOO"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- match, _ := session.GrepString("BAR")
- Expect(match).Should(BeTrue())
+ Expect(session.OutputToString()).To(Equal("BAR"))
session = podmanTest.Podman([]string{"exec", "--env", "PATH=/bin", "test1", "printenv", "PATH"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- match, _ = session.GrepString("/bin")
- Expect(match).Should(BeTrue())
+ Expect(session.OutputToString()).To(Equal("/bin"))
})
It("podman exec os.Setenv env", func() {
@@ -107,8 +105,7 @@ var _ = Describe("Podman exec", func() {
session := podmanTest.Podman([]string{"exec", "--env", "FOO", "test1", "printenv", "FOO"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- match, _ := session.GrepString("BAR")
- Expect(match).Should(BeTrue())
+ Expect(session.OutputToString()).To(Equal("BAR"))
os.Unsetenv("FOO")
})
@@ -142,8 +139,7 @@ var _ = Describe("Podman exec", func() {
session := podmanTest.Podman([]string{"exec", "--interactive", "--tty", "test1", "/usr/bin/stty", "--all"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- match, _ := session.GrepString(" onlcr")
- Expect(match).Should(BeTrue())
+ Expect(session.OutputToString()).To(ContainSubstring(" onlcr"))
})
It("podman exec simple command with user", func() {
@@ -199,14 +195,12 @@ var _ = Describe("Podman exec", func() {
session := podmanTest.Podman([]string{"exec", "--workdir", "/tmp", "test1", "pwd"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- match, _ := session.GrepString("/tmp")
- Expect(match).Should(BeTrue())
+ Expect(session.OutputToString()).To(Equal("/tmp"))
session = podmanTest.Podman([]string{"exec", "-w", "/tmp", "test1", "pwd"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- match, _ = session.GrepString("/tmp")
- Expect(match).Should(BeTrue())
+ Expect(session.OutputToString()).To(Equal("/tmp"))
})
It("podman exec missing working directory test", func() {
@@ -280,7 +274,7 @@ var _ = Describe("Podman exec", func() {
exec := podmanTest.Podman([]string{"exec", "-ti", ctrName2, "id"})
exec.WaitWithDefaultTimeout()
Expect(exec.ExitCode()).To(Equal(0))
- Expect(strings.Contains(exec.OutputToString(), fmt.Sprintf("%s(%s)", gid, groupName))).To(BeTrue())
+ Expect(exec.OutputToString()).To(ContainSubstring(fmt.Sprintf("%s(%s)", gid, groupName)))
})
It("podman exec preserves container groups with --user and --group-add", func() {
@@ -300,9 +294,9 @@ RUN useradd -u 1000 auser`
exec.WaitWithDefaultTimeout()
Expect(exec.ExitCode()).To(Equal(0))
output := exec.OutputToString()
- Expect(strings.Contains(output, "4000(first)")).To(BeTrue())
- Expect(strings.Contains(output, "4001(second)")).To(BeTrue())
- Expect(strings.Contains(output, "1000(auser)")).To(BeTrue())
+ Expect(output).To(ContainSubstring("4000(first)"))
+ Expect(output).To(ContainSubstring("4001(second)"))
+ Expect(output).To(ContainSubstring("1000(auser)"))
// Kill the container just so the test does not take 15 seconds to stop.
kill := podmanTest.Podman([]string{"kill", ctrName})
@@ -323,7 +317,7 @@ RUN useradd -u 1000 auser`
data := podmanTest.InspectContainer(ctrName)
Expect(len(data)).To(Equal(1))
Expect(len(data[0].ExecIDs)).To(Equal(1))
- Expect(strings.Contains(exec1.OutputToString(), data[0].ExecIDs[0])).To(BeTrue())
+ Expect(exec1.OutputToString()).To(ContainSubstring(data[0].ExecIDs[0]))
exec2 := podmanTest.Podman([]string{"exec", "-t", "-i", ctrName, "ps", "-a"})
exec2.WaitWithDefaultTimeout()
diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go
index 797d51c33..7ec36b2f8 100644
--- a/test/e2e/pod_infra_container_test.go
+++ b/test/e2e/pod_infra_container_test.go
@@ -383,12 +383,14 @@ var _ = Describe("Podman pod create", func() {
podID := session.OutputToString()
// verify we can add a host to the infra's /etc/hosts
- session = podmanTest.Podman([]string{"run", "--pod", podID, "--add-host", "foobar:127.0.0.1", BB, "ping", "-c", "1", "foobar"})
+ // N/B: Using alpine for ping, since BB ping throws
+ // permission denied error as of Fedora 33.
+ session = podmanTest.Podman([]string{"run", "--pod", podID, "--add-host", "foobar:127.0.0.1", ALPINE, "ping", "-c", "1", "foobar"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
// verify we can see the other hosts of infra's /etc/hosts
- session = podmanTest.Podman([]string{"run", "--pod", podID, BB, "ping", "-c", "1", "foobar"})
+ session = podmanTest.Podman([]string{"run", "--pod", podID, ALPINE, "ping", "-c", "1", "foobar"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
diff --git a/troubleshooting.md b/troubleshooting.md
index c42afb642..2e0abae21 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -680,3 +680,9 @@ file `/etc/systemd/system/user@.service.d/delegate.conf` with the contents:
After logging out and loggin back in, you should have permission to set CPU
limits.
+
+### 26) `exec container process '/bin/sh': Exec format error` (or another binary than `bin/sh`)
+
+This can happen when running a container from an image for another architecture than the one you are running on.
+
+For example, if a remote repository only has, and thus send you, a `linux/arm64` _OS/ARCH_ but you run on `linux/amd64` (as happened in https://github.com/openMF/community-app/issues/3323 due to https://github.com/timbru31/docker-ruby-node/issues/564).
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 82bfa5ce3..63f23d2af 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.23.8
+1.23.9
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
deleted file mode 100644
index d28ba9d69..000000000
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package homedir
-
-// Copyright 2013-2018 Docker, Inc.
-// NOTE: this package has originally been copied from github.com/docker/docker.
-
-import (
- "errors"
- "os"
- "path/filepath"
- "strings"
-)
-
-// GetRuntimeDir returns XDG_RUNTIME_DIR.
-// XDG_RUNTIME_DIR is typically configured via pam_systemd.
-// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetRuntimeDir() (string, error) {
- if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
- return xdgRuntimeDir, nil
- }
- return "", errors.New("could not get XDG_RUNTIME_DIR")
-}
-
-// StickRuntimeDirContents sets the sticky bit on files that are under
-// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
-//
-// StickyRuntimeDir returns slice of sticked files.
-// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func StickRuntimeDirContents(files []string) ([]string, error) {
- runtimeDir, err := GetRuntimeDir()
- if err != nil {
- // ignore error if runtimeDir is empty
- return nil, nil
- }
- runtimeDir, err = filepath.Abs(runtimeDir)
- if err != nil {
- return nil, err
- }
- var sticked []string
- for _, f := range files {
- f, err = filepath.Abs(f)
- if err != nil {
- return sticked, err
- }
- if strings.HasPrefix(f, runtimeDir+"/") {
- if err = stick(f); err != nil {
- return sticked, err
- }
- sticked = append(sticked, f)
- }
- }
- return sticked, nil
-}
-
-func stick(f string) error {
- st, err := os.Stat(f)
- if err != nil {
- return err
- }
- m := st.Mode()
- m |= os.ModeSticky
- return os.Chmod(f, m)
-}
-
-// GetDataHome returns XDG_DATA_HOME.
-// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetDataHome() (string, error) {
- if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
- return xdgDataHome, nil
- }
- home := os.Getenv("HOME")
- if home == "" {
- return "", errors.New("could not get either XDG_DATA_HOME or HOME")
- }
- return filepath.Join(home, ".local", "share"), nil
-}
-
-// GetConfigHome returns XDG_CONFIG_HOME.
-// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetConfigHome() (string, error) {
- if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
- return xdgConfigHome, nil
- }
- home := os.Getenv("HOME")
- if home == "" {
- return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
- }
- return filepath.Join(home, ".config"), nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
index f7bcfb878..4f778c858 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!darwin
package homedir
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
index dcadb7e8d..0274d037f 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -6,8 +6,12 @@ package homedir
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
+ "errors"
"os"
- "os/user"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/unshare"
)
// Key returns the env var name for the user's home dir based on
@@ -25,13 +29,8 @@ func Key() string {
//
// If needing to do nss lookups, do not disable cgo or set osusergo.
func Get() string {
- home := os.Getenv(Key())
- if home == "" {
- if u, err := user.Current(); err == nil {
- return u.HomeDir
- }
- }
- return home
+ homedir, _ := unshare.HomeDir()
+ return homedir
}
// GetShortcutString returns the string that is shortcut to user's home directory
@@ -39,3 +38,88 @@ func Get() string {
func GetShortcutString() string {
return "~"
}
+
+// GetRuntimeDir returns XDG_RUNTIME_DIR.
+// XDG_RUNTIME_DIR is typically configured via pam_systemd.
+// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetRuntimeDir() (string, error) {
+ if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
+ return xdgRuntimeDir, nil
+ }
+ return "", errors.New("could not get XDG_RUNTIME_DIR")
+}
+
+// StickRuntimeDirContents sets the sticky bit on files that are under
+// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
+//
+// StickyRuntimeDir returns slice of sticked files.
+// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ runtimeDir, err := GetRuntimeDir()
+ if err != nil {
+ // ignore error if runtimeDir is empty
+ return nil, nil
+ }
+ runtimeDir, err = filepath.Abs(runtimeDir)
+ if err != nil {
+ return nil, err
+ }
+ var sticked []string
+ for _, f := range files {
+ f, err = filepath.Abs(f)
+ if err != nil {
+ return sticked, err
+ }
+ if strings.HasPrefix(f, runtimeDir+"/") {
+ if err = stick(f); err != nil {
+ return sticked, err
+ }
+ sticked = append(sticked, f)
+ }
+ }
+ return sticked, nil
+}
+
+func stick(f string) error {
+ st, err := os.Stat(f)
+ if err != nil {
+ return err
+ }
+ m := st.Mode()
+ m |= os.ModeSticky
+ return os.Chmod(f, m)
+}
+
+// GetDataHome returns XDG_DATA_HOME.
+// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetDataHome() (string, error) {
+ if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
+ return xdgDataHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_DATA_HOME or HOME")
+ }
+ return filepath.Join(home, ".local", "share"), nil
+}
+
+// GetConfigHome returns XDG_CONFIG_HOME.
+// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetConfigHome() (string, error) {
+ if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
+ return xdgConfigHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
+ }
+ return filepath.Join(home, ".config"), nil
+}
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 5ba8cc418..49ec544a3 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -221,94 +221,71 @@ outer:
return size, nil
}
+func minInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxInt(a, b int) int {
+ if a < b {
+ return b
+ }
+ return a
+}
+
// subtractHostIDs return the subtraction of the range USED from AVAIL. The range is specified
// by [HostID, HostID+Size).
// ContainerID is ignored.
func subtractHostIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDMap {
- switch {
- case used.HostID <= avail.HostID && used.HostID+used.Size >= avail.HostID+avail.Size:
- return nil
- case used.HostID <= avail.HostID && used.HostID+used.Size > avail.HostID && used.HostID+used.Size < avail.HostID+avail.Size:
- newContainerID := avail.ContainerID + used.Size
- newHostID := used.HostID + used.Size
- r := idtools.IDMap{
- ContainerID: newContainerID,
- HostID: newHostID,
- Size: avail.Size + avail.HostID - newHostID,
- }
- return []idtools.IDMap{r}
- case used.HostID > avail.HostID && used.HostID < avail.HostID+avail.Size && used.HostID+used.Size >= avail.HostID+avail.Size:
- r := idtools.IDMap{
+ var out []idtools.IDMap
+ availEnd := avail.HostID + avail.Size
+ usedEnd := used.HostID + used.Size
+ // Intersection of [avail.HostID, availEnd) and (-inf, used.HostID) is [avail.HostID, newEnd).
+ if newEnd := minInt(availEnd, used.HostID); newEnd > avail.HostID {
+ out = append(out, idtools.IDMap{
ContainerID: avail.ContainerID,
HostID: avail.HostID,
- Size: used.HostID - avail.HostID,
- }
- return []idtools.IDMap{r}
- case used.HostID > avail.HostID && used.HostID < avail.HostID+avail.Size && used.HostID+used.Size < avail.HostID+avail.Size:
- r1 := idtools.IDMap{
- ContainerID: avail.ContainerID,
- HostID: avail.HostID,
- Size: used.HostID - avail.HostID,
- }
- r2 := idtools.IDMap{
- ContainerID: used.ContainerID + used.Size,
- HostID: avail.HostID + (used.HostID - avail.HostID),
- Size: avail.HostID + avail.Size - used.HostID - used.Size,
- }
- return []idtools.IDMap{r1, r2}
- default:
- r := idtools.IDMap{
- ContainerID: 0,
- HostID: avail.HostID,
- Size: avail.Size,
- }
- return []idtools.IDMap{r}
- }
+ Size: newEnd - avail.HostID,
+ })
+ }
+ // Intersection of [avail.HostID, availEnd) and [usedEnd, +inf) is [newStart, availEnd).
+ if newStart := maxInt(avail.HostID, usedEnd); newStart < availEnd {
+ out = append(out, idtools.IDMap{
+ ContainerID: newStart + avail.ContainerID - avail.HostID,
+ HostID: newStart,
+ Size: availEnd - newStart,
+ })
+ }
+ return out
}
// subtractContainerIDs return the subtraction of the range USED from AVAIL. The range is specified
// by [ContainerID, ContainerID+Size).
// HostID is ignored.
func subtractContainerIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDMap {
- switch {
- case used.ContainerID <= avail.ContainerID && used.ContainerID+used.Size >= avail.ContainerID+avail.Size:
- return nil
- case used.ContainerID <= avail.ContainerID && used.ContainerID+used.Size > avail.ContainerID && used.ContainerID+used.Size < avail.ContainerID+avail.Size:
- newContainerID := used.ContainerID + used.Size
- newHostID := avail.HostID + used.Size
- r := idtools.IDMap{
- ContainerID: newContainerID,
- HostID: newHostID,
- Size: avail.Size + avail.ContainerID - newContainerID,
- }
- return []idtools.IDMap{r}
- case used.ContainerID > avail.ContainerID && used.ContainerID < avail.ContainerID+avail.Size && used.ContainerID+used.Size >= avail.ContainerID+avail.Size:
- r := idtools.IDMap{
- ContainerID: avail.ContainerID,
- HostID: avail.HostID,
- Size: used.ContainerID - avail.ContainerID,
- }
- return []idtools.IDMap{r}
- case used.ContainerID > avail.ContainerID && used.ContainerID < avail.ContainerID+avail.Size && used.ContainerID+used.Size < avail.ContainerID+avail.Size:
- r1 := idtools.IDMap{
+ var out []idtools.IDMap
+ availEnd := avail.ContainerID + avail.Size
+ usedEnd := used.ContainerID + used.Size
+ // Intersection of [avail.ContainerID, availEnd) and (-inf, used.ContainerID) is
+ // [avail.ContainerID, newEnd).
+ if newEnd := minInt(availEnd, used.ContainerID); newEnd > avail.ContainerID {
+ out = append(out, idtools.IDMap{
ContainerID: avail.ContainerID,
HostID: avail.HostID,
- Size: used.ContainerID - avail.ContainerID,
- }
- r2 := idtools.IDMap{
- ContainerID: used.ContainerID + used.Size,
- HostID: avail.HostID + (used.ContainerID - avail.ContainerID),
- Size: avail.ContainerID + avail.Size - used.ContainerID - used.Size,
- }
- return []idtools.IDMap{r1, r2}
- default:
- r := idtools.IDMap{
- ContainerID: avail.ContainerID,
- HostID: avail.HostID,
- Size: avail.Size,
- }
- return []idtools.IDMap{r}
- }
+ Size: newEnd - avail.ContainerID,
+ })
+ }
+ // Intersection of [avail.ContainerID, availEnd) and [usedEnd, +inf) is [newStart, availEnd).
+ if newStart := maxInt(avail.ContainerID, usedEnd); newStart < availEnd {
+ out = append(out, idtools.IDMap{
+ ContainerID: newStart,
+ HostID: newStart + avail.HostID - avail.ContainerID,
+ Size: availEnd - newStart,
+ })
+ }
+ return out
}
// subtractAll subtracts all usedIDs from the available IDs.
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 762c3a00d..bd6c4feb1 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -273,7 +273,11 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
storageOpts.RunRoot = defaultRootlessRunRoot
}
if storageOpts.GraphRoot == "" {
- storageOpts.GraphRoot = defaultRootlessGraphRoot
+ if storageOpts.RootlessStoragePath != "" {
+ storageOpts.GraphRoot = storageOpts.RootlessStoragePath
+ } else {
+ storageOpts.GraphRoot = defaultRootlessGraphRoot
+ }
}
}
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a7b35a318..da35fe19a 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -166,7 +166,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.23.8
+# github.com/containers/storage v1.23.9
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs