summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml2
-rw-r--r--CONTRIBUTING.md13
-rw-r--r--cmd/podman/containers/rm.go13
-rw-r--r--cmd/podman/images/diff.go9
-rw-r--r--cmd/podman/inspect/inspect.go49
-rw-r--r--cmd/podman/play/kube.go1
-rw-r--r--cmd/podman/pods/inspect.go24
-rw-r--r--contrib/cirrus/lib.sh29
-rwxr-xr-xcontrib/cirrus/runner.sh3
-rwxr-xr-xcontrib/cirrus/setup_environment.sh7
-rwxr-xr-xcontrib/cirrus/shellcheck.sh2
-rw-r--r--docs/source/markdown/podman-play-kube.1.md15
-rw-r--r--docs/source/markdown/podman-rm.1.md9
-rwxr-xr-xhack/get_ci_vm.sh330
-rw-r--r--libpod/container.go19
-rw-r--r--libpod/container_config.go5
-rw-r--r--libpod/container_inspect.go3
-rw-r--r--libpod/container_internal_linux.go7
-rw-r--r--libpod/define/container_inspect.go3
-rw-r--r--libpod/image/docker_registry_options.go1
-rw-r--r--libpod/kube.go40
-rw-r--r--libpod/oci_conmon_linux.go12
-rw-r--r--libpod/runtime_ctr.go1
-rw-r--r--nix/nixpkgs.json6
-rw-r--r--pkg/api/handlers/compat/containers_create.go9
-rw-r--r--pkg/api/handlers/compat/images.go42
-rw-r--r--pkg/api/server/register_images.go25
-rw-r--r--pkg/domain/entities/containers.go1
-rw-r--r--pkg/domain/entities/play.go2
-rw-r--r--pkg/domain/infra/abi/containers.go28
-rw-r--r--pkg/domain/infra/abi/play.go89
-rw-r--r--pkg/domain/infra/abi/play_test.go254
-rw-r--r--test/apiv2/10-images.at3
-rw-r--r--test/apiv2/20-containers.at2
-rw-r--r--test/e2e/generate_kube_test.go32
-rw-r--r--test/e2e/network_test.go2
-rw-r--r--test/e2e/play_kube_test.go235
-rw-r--r--test/e2e/rm_test.go1
-rw-r--r--test/e2e/run_test.go2
-rw-r--r--test/system/055-rm.bats15
40 files changed, 1007 insertions, 338 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index b23ec1a90..da33c81e2 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -47,7 +47,7 @@ env:
TEST_ENVIRON: host # 'host' or 'container'
PODBIN_NAME: podman # 'podman' or 'remote'
PRIV_NAME: root # 'root' or 'rootless'
- DISTRO_NV: $FEDORA_NAME # any {PRIOR_,}{FEDORA,UBUNTU}_NAME value
+ DISTRO_NV: # any {PRIOR_,}{FEDORA,UBUNTU}_NAME value
VM_IMAGE_NAME: # One of the "Google-cloud VM Images" (above)
CTR_FQIN: # One of the "Container FQIN's" (above)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ba321921c..a813fcc35 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -46,7 +46,7 @@ This section describes how to start a contribution to Podman.
### Prepare your environment
-Read the [install documentation to see how to install dependencies](install.md) .
+Read the [install documentation to see how to install dependencies](https://podman.io/getting-started/installation#build-and-run-dependencies).
The install documentation will illustrate the following steps:
- install libs and tools
@@ -86,6 +86,17 @@ Makefile allow you to install needed tools:
$ make install.tools
```
+### Prerequisite before build
+
+You need install some dependencies before building a binary.
+
+#### Fedora
+
+ ```shell
+ $ sudo dnf install gpgme-devel libseccomp-devel.x86_64 libseccomp-devel.x86_64 systemd-devel
+ $ export PKG_CONFIG_PATH="/usr/lib/pkgconfig"
+ ```
+
### Building binaries and test your changes
To test your changes do `make binaries` to generate your binaries.
diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go
index f8f12234d..a7739b3ba 100644
--- a/cmd/podman/containers/rm.go
+++ b/cmd/podman/containers/rm.go
@@ -57,13 +57,12 @@ func rmFlags(flags *pflag.FlagSet) {
flags.BoolVarP(&rmOptions.All, "all", "a", false, "Remove all containers")
flags.BoolVarP(&rmOptions.Ignore, "ignore", "i", false, "Ignore errors when a specified container is missing")
flags.BoolVarP(&rmOptions.Force, "force", "f", false, "Force removal of a running or unusable container. The default is false")
- flags.BoolVar(&rmOptions.Storage, "storage", false, "Remove container from storage library")
flags.BoolVarP(&rmOptions.Volumes, "volumes", "v", false, "Remove anonymous volumes associated with the container")
flags.StringArrayVarP(&rmOptions.CIDFiles, "cidfile", "", nil, "Read the container ID from the file")
- if registry.IsRemote() {
- _ = flags.MarkHidden("ignore")
- _ = flags.MarkHidden("cidfile")
+ if !registry.IsRemote() {
+ // This option is deprecated, but needs to still exists for backwards compatibility
+ flags.Bool("storage", false, "Remove container from storage library")
_ = flags.MarkHidden("storage")
}
}
@@ -97,12 +96,6 @@ func removeContainers(namesOrIDs []string, rmOptions entities.RmOptions, setExit
var (
errs utils.OutputErrors
)
- // Storage conflicts with --all/--latest/--volumes/--cidfile/--ignore
- if rmOptions.Storage {
- if rmOptions.All || rmOptions.Ignore || rmOptions.Latest || rmOptions.Volumes || rmOptions.CIDFiles != nil {
- return errors.Errorf("--storage conflicts with --volumes, --all, --latest, --ignore and --cidfile")
- }
- }
responses, err := registry.ContainerEngine().ContainerRm(context.Background(), namesOrIDs, rmOptions)
if err != nil {
if setExit {
diff --git a/cmd/podman/images/diff.go b/cmd/podman/images/diff.go
index 26147345e..05a05fa04 100644
--- a/cmd/podman/images/diff.go
+++ b/cmd/podman/images/diff.go
@@ -1,6 +1,7 @@
package images
import (
+ "github.com/containers/podman/v2/cmd/podman/parse"
"github.com/containers/podman/v2/cmd/podman/registry"
"github.com/containers/podman/v2/cmd/podman/report"
"github.com/containers/podman/v2/pkg/domain/entities"
@@ -49,11 +50,11 @@ func diff(cmd *cobra.Command, args []string) error {
return err
}
- switch diffOpts.Format {
- case "":
- return report.ChangesToTable(results)
- case "json":
+ switch {
+ case parse.MatchesJSONFormat(diffOpts.Format):
return report.ChangesToJSON(results)
+ case diffOpts.Format == "":
+ return report.ChangesToTable(results)
default:
return errors.New("only supported value for '--format' is 'json'")
}
diff --git a/cmd/podman/inspect/inspect.go b/cmd/podman/inspect/inspect.go
index f29527412..658463650 100644
--- a/cmd/podman/inspect/inspect.go
+++ b/cmd/podman/inspect/inspect.go
@@ -4,10 +4,14 @@ import (
"context"
"fmt"
"os"
+ "regexp"
"strings"
+ "text/tabwriter"
+ "text/template"
- "github.com/containers/buildah/pkg/formats"
+ "github.com/containers/podman/v2/cmd/podman/parse"
"github.com/containers/podman/v2/cmd/podman/registry"
+ "github.com/containers/podman/v2/cmd/podman/report"
"github.com/containers/podman/v2/cmd/podman/validate"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/pkg/errors"
@@ -24,6 +28,9 @@ const (
AllType = "all"
)
+// Pull in configured json library
+var json = registry.JSONLibrary()
+
// AddInspectFlagSet takes a command and adds the inspect flags and returns an
// InspectOptions object.
func AddInspectFlagSet(cmd *cobra.Command) *entities.InspectOptions {
@@ -80,7 +87,7 @@ func newInspector(options entities.InspectOptions) (*inspector, error) {
// inspect inspects the specified container/image names or IDs.
func (i *inspector) inspect(namesOrIDs []string) error {
// data - dumping place for inspection results.
- var data []interface{} //nolint
+ var data []interface{} // nolint
var errs []error
ctx := context.Background()
@@ -134,15 +141,19 @@ func (i *inspector) inspect(namesOrIDs []string) error {
data = []interface{}{}
}
- var out formats.Writer
- if i.options.Format == "json" || i.options.Format == "" { // "" for backwards compat
- out = formats.JSONStructArray{Output: data}
- } else {
- out = formats.StdoutTemplateArray{Output: data, Template: inspectFormat(i.options.Format)}
+ var err error
+ switch {
+ case parse.MatchesJSONFormat(i.options.Format) || i.options.Format == "":
+ err = printJSON(data)
+ default:
+ row := inspectNormalize(i.options.Format)
+ row = "{{range . }}" + report.NormalizeFormat(row) + "{{end}}"
+ err = printTmpl(tmpType, row, data)
}
- if err := out.Out(); err != nil {
+ if err != nil {
logrus.Errorf("Error printing inspect output: %v", err)
}
+
if len(errs) > 0 {
if len(errs) > 1 {
for _, err := range errs[1:] {
@@ -154,8 +165,22 @@ func (i *inspector) inspect(namesOrIDs []string) error {
return nil
}
+func printJSON(data []interface{}) error {
+ enc := json.NewEncoder(os.Stdout)
+ return enc.Encode(data)
+}
+
+func printTmpl(typ, row string, data []interface{}) error {
+ t, err := template.New(typ + " inspect").Parse(row)
+ if err != nil {
+ return err
+ }
+ w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
+ return t.Execute(w, data)
+}
+
func (i *inspector) inspectAll(ctx context.Context, namesOrIDs []string) ([]interface{}, []error, error) {
- var data []interface{} //nolint
+ var data []interface{} // nolint
allErrs := []error{}
for _, name := range namesOrIDs {
ctrData, errs, err := i.containerEngine.ContainerInspect(ctx, []string{name}, i.options)
@@ -179,9 +204,11 @@ func (i *inspector) inspectAll(ctx context.Context, namesOrIDs []string) ([]inte
return data, allErrs, nil
}
-func inspectFormat(row string) string {
+func inspectNormalize(row string) string {
+ m := regexp.MustCompile(`{{\s*\.Id\s*}}`)
+ row = m.ReplaceAllString(row, "{{.ID}}")
+
r := strings.NewReplacer(
- "{{.Id}}", formats.IDString,
".Src", ".Source",
".Dst", ".Destination",
".ImageID", ".Image",
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index 54a6d0677..976d720ee 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -60,6 +60,7 @@ func init() {
flags.BoolVar(&kubeOptions.TLSVerifyCLI, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
flags.StringVar(&kubeOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.StringVar(&kubeOptions.SeccompProfileRoot, "seccomp-profile-root", defaultSeccompRoot, "Directory path for seccomp profiles")
+ flags.StringSliceVar(&kubeOptions.ConfigMaps, "configmap", []string{}, "`Pathname` of a YAML file containing a kubernetes configmap")
}
_ = flags.MarkHidden("signature-policy")
}
diff --git a/cmd/podman/pods/inspect.go b/cmd/podman/pods/inspect.go
index bc20352b0..cad15d10f 100644
--- a/cmd/podman/pods/inspect.go
+++ b/cmd/podman/pods/inspect.go
@@ -3,9 +3,13 @@ package pods
import (
"context"
"fmt"
+ "os"
+ "text/tabwriter"
+ "text/template"
- "github.com/containers/buildah/pkg/formats"
+ "github.com/containers/podman/v2/cmd/podman/parse"
"github.com/containers/podman/v2/cmd/podman/registry"
+ "github.com/containers/podman/v2/cmd/podman/report"
"github.com/containers/podman/v2/cmd/podman/validate"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/pkg/errors"
@@ -57,11 +61,19 @@ func inspect(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
- var data interface{} = responses
- var out formats.Writer = formats.JSONStruct{Output: data}
- if inspectOptions.Format != "json" {
- out = formats.StdoutTemplate{Output: data, Template: inspectOptions.Format}
+
+ if parse.MatchesJSONFormat(inspectOptions.Format) {
+ enc := json.NewEncoder(os.Stdout)
+ return enc.Encode(responses)
+ }
+
+ row := report.NormalizeFormat(inspectOptions.Format)
+
+ t, err := template.New("pod inspect").Parse(row)
+ if err != nil {
+ return err
}
- return out.Out()
+ w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
+ return t.Execute(w, *responses)
}
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 23987938b..e5124d8e4 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -6,18 +6,20 @@
# BEGIN Global export of all variables
set -a
-# Due to differences across platforms and runtime execution environments,
-# handling of the (otherwise) default shell setup is non-uniform. Rather
-# than attempt to workaround differences, simply force-load/set required
-# items every time this library is utilized.
-source /etc/profile
-source /etc/environment
-USER="$(whoami)"
-HOME="$(getent passwd $USER | cut -d : -f 6)"
-# Some platforms set and make this read-only
-[[ -n "$UID" ]] || \
- UID=$(getent passwd $USER | cut -d : -f 3)
-GID=$(getent passwd $USER | cut -d : -f 4)
+if [[ "$CI" == "true" ]]; then
+ # Due to differences across platforms and runtime execution environments,
+ # handling of the (otherwise) default shell setup is non-uniform. Rather
+ # than attempt to workaround differences, simply force-load/set required
+ # items every time this library is utilized.
+ source /etc/profile
+ source /etc/environment
+ USER="$(whoami)"
+ HOME="$(getent passwd $USER | cut -d : -f 6)"
+ # Some platforms set and make this read-only
+ [[ -n "$UID" ]] || \
+ UID=$(getent passwd $USER | cut -d : -f 3)
+ GID=$(getent passwd $USER | cut -d : -f 4)
+fi
# During VM Image build, the 'containers/automation' installation
# was performed. The final step of that installation sets the
@@ -43,6 +45,9 @@ OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | cut -d '.' -f 1)"
# Combined to ease soe usage
OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
+# This is normally set from .cirrus.yml but default is necessary when
+# running under hack/get_ci_vm.sh since it cannot infer the value.
+DISTRO_NV="${DISTRO_NV:-$OS_REL_VER}"
# Essential default paths, many are overridden when executing under Cirrus-CI
GOPATH="${GOPATH:-/var/tmp/go}"
diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh
index 8a85acbd1..bfac8e7cb 100755
--- a/contrib/cirrus/runner.sh
+++ b/contrib/cirrus/runner.sh
@@ -139,6 +139,9 @@ function _run_vendor() {
}
function _run_build() {
+ # Ensure always start from clean-slate with all vendor modules downloaded
+ make clean
+ make vendor
make podman-release
make podman-remote-linux-release
}
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index c064b6840..156c9b7b2 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -67,9 +67,8 @@ case "$CG_FS_TYPE" in
*) die_unknown CG_FS_TYPE
esac
-# Required to be defined by caller: Which distribution are we testing on
-# shellcheck disable=SC2154
-case "$DISTRO_NV" in
+# Which distribution are we testing on.
+case "$OS_RELEASE_ID" in
ubuntu*) ;;
fedora*)
if ((CONTAINER==0)); then # Not yet running inside a container
@@ -83,7 +82,7 @@ case "$DISTRO_NV" in
setsebool container_manage_cgroup true
fi
;;
- *) die_unknown DISTRO_NV
+ *) die_unknown OS_RELEASE_ID
esac
# Required to be defined by caller: The environment where primary testing happens
diff --git a/contrib/cirrus/shellcheck.sh b/contrib/cirrus/shellcheck.sh
index edf8248d3..667d30c91 100755
--- a/contrib/cirrus/shellcheck.sh
+++ b/contrib/cirrus/shellcheck.sh
@@ -11,6 +11,6 @@ shellcheck --color=always --format=tty \
--enable add-default-case,avoid-nullary-conditions,check-unassigned-uppercase \
--exclude SC2046,SC2034,SC2090,SC2064 \
--wiki-link-count=0 --severity=warning \
- $SCRIPT_BASE/*.sh
+ $SCRIPT_BASE/*.sh hack/get_ci_vm.sh
echo "Shellcheck: PASS"
diff --git a/docs/source/markdown/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md
index dd9441800..519b153f4 100644
--- a/docs/source/markdown/podman-play-kube.1.md
+++ b/docs/source/markdown/podman-play-kube.1.md
@@ -30,6 +30,12 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+**--configmap**=*path*
+
+Use Kubernetes configmap YAML at path to provide a source for environment variable values within the containers of the pod.
+
+Note: The *--configmap* option can be used multiple times or a comma-separated list of paths can be used to pass multiple Kubernetes configmap YAMLs.
+
**--creds**
The [username[:password]] to use to authenticate with the registry if required.
@@ -66,6 +72,15 @@ $ podman play kube demo.yml
52182811df2b1e73f36476003a66ec872101ea59034ac0d4d3a7b40903b955a6
```
+Provide `configmap-foo.yml` and `configmap-bar.yml` as sources for environment variables within the containers.
+```
+$ podman play kube demo.yml --configmap configmap-foo.yml,configmap-bar.yml
+52182811df2b1e73f36476003a66ec872101ea59034ac0d4d3a7b40903b955a6
+
+$ podman play kube demo.yml --configmap configmap-foo.yml --configmap configmap-bar.yml
+52182811df2b1e73f36476003a66ec872101ea59034ac0d4d3a7b40903b955a6
+```
+
CNI network(s) can be specified as comma-separated list using ``--network``
```
$ podman play kube demo.yml --network cni1,cni2
diff --git a/docs/source/markdown/podman-rm.1.md b/docs/source/markdown/podman-rm.1.md
index e3e6740df..36904a128 100644
--- a/docs/source/markdown/podman-rm.1.md
+++ b/docs/source/markdown/podman-rm.1.md
@@ -43,13 +43,6 @@ to run containers such as CRI-O, the last started container could be from either
The latest option is not supported on the remote client.
-**--storage**
-
-Remove external containers from the storage library.
-This is only possible with containers that are not present in libpod can be seen by **podman ps --all --storage**).
-It is used to remove external containers from **podman build** and **buildah**, and orphan containers which were only partially removed by **podman rm**.
-The storage option conflicts with the **--all**, **--latest**, and **--volumes** options.
-
**--volumes**, **-v**
Remove anonymous volumes associated with the container. This does not include named volumes
@@ -96,7 +89,7 @@ $ podman rm -f --latest
**125** The command fails for any other reason
## SEE ALSO
-podman(1), podman-image-rm(1), podman-ps(1), podman-build(1)
+podman(1), podman-image-rm(1), podman-ps(1), podman-build(1), buildah(1), cri-o(1)
## HISTORY
August 2017, Originally compiled by Ryan Cole <rycole@redhat.com>
diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh
index adf3b1bf2..f8c7e792e 100755
--- a/hack/get_ci_vm.sh
+++ b/hack/get_ci_vm.sh
@@ -1,49 +1,82 @@
#!/usr/bin/env bash
+#
+# For help and usage information, simply execute the script w/o any arguments.
+#
+# This script is intended to be run by podman developers who need to debug
+# problems specifically related to Cirrus-CI automated testing. However,
+# because it's only loosely coupled to the `.cirrus.yml` configuration, it must
+# orchestrate VMs in GCP directly. This means users need to have
+# pre-authorization (access) to manipulate google-cloud resoures. Additionally,
+# there are no guarantees it will remain in-sync with other automation-related
+# scripts. Therefore it may not always function for everybody in every
+# future scenario without updates/modifications/tweaks.
+
set -e
-RED="\e[1;36;41m"
-YEL="\e[1;33;44m"
+RED="\e[1;31m"
+YEL="\e[1;32m"
NOR="\e[0m"
USAGE_WARNING="
-${YEL}WARNING: This will not work without local sudo access to run podman,${NOR}
- ${YEL}and prior authorization to use the libpod GCP project. Also,${NOR}
- ${YEL}possession of the proper ssh private key is required.${NOR}
+${YEL}WARNING: This will not work without podman,${NOR}
+ ${YEL}and prior authorization to use the libpod GCP project.${NOR}
"
-# TODO: Many/most of these values should come from .cirrus.yml
+# These values come from .cirrus.yml gce_instance clause
ZONE="${ZONE:-us-central1-a}"
CPUS="2"
MEMORY="4Gb"
DISK="200"
PROJECT="libpod-218412"
GOSRC="/var/tmp/go/src/github.com/containers/podman"
-GCLOUD_IMAGE=${GCLOUD_IMAGE:-quay.io/cevich/gcloud_centos:latest}
-GCLOUD_SUDO=${GCLOUD_SUDO-sudo}
+GIT_REPO="https://github.com/containers/podman.git"
+
+# Container image with necessary runtime elements
+GCLOUD_IMAGE="${GCLOUD_IMAGE:-docker.io/google/cloud-sdk:alpine}"
+GCLOUD_CFGDIR=".config/gcloud"
+
+SCRIPT_FILENAME=$(basename ${BASH_SOURCE[0]})
+HOOK_FILENAME="hook_${SCRIPT_FILENAME}"
# Shared tmp directory between container and us
-TMPDIR=$(mktemp -d --tmpdir $(basename $0)_tmpdir_XXXXXX)
+TMPDIR=$(mktemp -d --tmpdir ${SCRIPT_FILENAME}_tmpdir_XXXXXX)
-LIBPODROOT=$(realpath "$(dirname $0)/../")
+show_usage() {
+ echo -e "\n${RED}ERROR: $1${NOR}"
+ echo -e "${YEL}Usage: $SCRIPT_FILENAME <image_name>${NOR}"
+ echo ""
+ if [[ -r ".cirrus.yml" ]]
+ then
+ echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}"
+ image_hints
+ echo ""
+ echo -e "${YEL}Optional:${NOR} If a $HOME/$GCLOUD_CFGDIR/$HOOK_FILENAME executable exists during"
+ echo "VM creation, it will be executed remotely after cloning"
+ echo "$GIT_REPO. The"
+ echo "current local working branch name and commit ID, will be provided as"
+ echo "it's arguments."
+ fi
+ exit 1
+}
+
+LIBPODROOT=$(realpath "$(dirname ${BASH_SOURCE[0]})/../")
# else: Assume $PWD is the root of the libpod repository
-[[ "$LIBPODROOT" != "/" ]] || LIBPODROOT=$PWD
+[[ "$LIBPODROOT" != "/" ]] || \
+ show_usage "Must execute script from within clone of containers/podman repo."
-# Command shortcuts save some typing (assumes $LIBPODROOT is subdir of $HOME)
-PGCLOUD="$GCLOUD_SUDO podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v $TMPDIR:$HOME -v $HOME/.config/gcloud:$HOME/.config/gcloud -v $HOME/.config/gcloud/ssh:$HOME/.ssh -v $LIBPODROOT:$LIBPODROOT $GCLOUD_IMAGE --configuration=libpod --project=$PROJECT"
-SCP_CMD="$PGCLOUD compute scp"
+[[ "$UID" -ne 0 ]] || \
+ show_usage "Must execute script as a regular (non-root) user."
+
+[[ "${LIBPODROOT#$HOME}" != "$LIBPODROOT" ]] || \
+ show_usage "Clone of containers/podman must be a subdirectory of \$HOME ($HOME)"
+# Disable SELinux labeling to allow read-only mounting of repository files
+PGCLOUD="podman run -it --rm --security-opt label=disable -v $TMPDIR:$TMPDIR -v $HOME/.config/gcloud:/root/.config/gcloud -v $HOME/.config/gcloud/ssh:/root/.ssh -v $LIBPODROOT:$LIBPODROOT:ro $GCLOUD_IMAGE gcloud --configuration=libpod --project=$PROJECT"
+SCP_CMD="$PGCLOUD compute scp"
showrun() {
- if [[ "$1" == "--background" ]]
- then
- shift
- # Properly escape any nested spaces, so command can be copy-pasted
- echo '+ '$(printf " %q" "$@")' &' > /dev/stderr
- "$@" &
- echo -e "${RED}<backgrounded>${NOR}"
- else
- echo '+ '$(printf " %q" "$@") > /dev/stderr
- "$@"
- fi
+ echo '+ '$(printf " %q" "$@") > /dev/stderr
+ echo ""
+ "$@"
}
cleanup() {
@@ -52,6 +85,7 @@ cleanup() {
wait
# set GCLOUD_DEBUG to leave tmpdir behind for postmortem
+ # shellcheck disable=SC2154
test -z "$GCLOUD_DEBUG" && rm -rf $TMPDIR
# Not always called from an exit handler, but should always exit when called
@@ -61,32 +95,18 @@ trap cleanup EXIT
delvm() {
echo -e "\n"
- echo -e "\n${YEL}Offering to Delete $VMNAME ${RED}(Might take a minute or two)${NOR}"
- echo -e "\n${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}"
+ echo -e "\n${YEL}Offering to Delete $VMNAME${NOR}"
+ echo -e "${RED}(Deletion might take a minute or two)${NOR}"
+ echo -e "${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}"
showrun $CLEANUP_CMD # prompts for Yes/No
cleanup
}
-show_usage() {
- echo -e "\n${RED}ERROR: $1${NOR}"
- echo -e "${YEL}Usage: $(basename $0) [-m <SPECIALMODE>] [-u <ROOTLESS_USER> ] <image_name>${NOR}"
- echo "Use -m <SPECIALMODE> with a supported value documented in contrib/cirrus/README.md."
- echo "With '-m rootless' must also specify -u <ROOTLESS_USER> with name of user to create & use"
- echo ""
- if [[ -r ".cirrus.yml" ]]
- then
- echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}"
- image_hints
- echo ""
- fi
- exit 1
-}
-
get_env_vars() {
# Deal with both YAML and embedded shell-like substitutions in values
# if substitution fails, fall back to printing naked env. var as-is.
python3 -c '
-import yaml,re
+import sys,yaml,re
env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"]
dollar_env_var=re.compile(r"\$(\w+)")
dollarcurly_env_var=re.compile(r"\$\{(\w+)\}")
@@ -98,11 +118,10 @@ class ReIterKey(dict):
rep=r"{\1}" # Convert env vars markup to -> str.format_map(re_iter_key) markup
out=ReIterKey()
for k,v in env.items():
- v=str(v)
- if "ENCRYPTED" not in v:
- out[k]=dollar_env_var.sub(rep, dollarcurly_env_var.sub(rep, v))
+ if "ENCRYPTED" not in str(v) and bool(v):
+ out[k]=dollar_env_var.sub(rep, dollarcurly_env_var.sub(rep, str(v)))
for k,v in out.items():
- print("{0}=\"{1}\"".format(k, v.format_map(out)))
+ sys.stdout.write("{0}=\"{1}\"\n".format(k, str(v).format_map(out)))
'
}
@@ -110,8 +129,14 @@ image_hints() {
get_env_vars | fgrep '_CACHE_IMAGE_NAME' | awk -F "=" '{print $2}'
}
-
+unset VM_IMAGE_NAME
+unset VMNAME
+unset CREATE_CMD
+unset SSH_CMD
+unset CLEANUP_CMD
+declare -xa ENVS
parse_args(){
+ local arg
echo -e "$USAGE_WARNING"
if [[ "$USER" =~ "root" ]]
@@ -119,86 +144,41 @@ parse_args(){
show_usage "This script must be run as a regular user."
fi
- ENVS="$(get_env_vars)"
- [[ "$#" -ge "1" ]] || \
- show_usage "Must specify at least one command-line parameter."
-
- IMAGE_NAME=""
- ROOTLESS_USER=""
- SPECIALMODE="none"
- for arg
- do
- if [[ "$SPECIALMODE" == "GRABNEXT" ]] && [[ "${arg:0:1}" != "-" ]]
- then
- SPECIALMODE="$arg"
- echo -e "${YEL}Using \$SPECIALMODE=$SPECIALMODE.${NOR}"
- continue
- elif [[ "$ROOTLESS_USER" == "GRABNEXT" ]] && [[ "${arg:0:1}" != "-" ]]
- then
- ROOTLESS_USER="$arg"
- echo -e "${YEL}Using \$ROOTLESS_USER=$ROOTLESS_USER.${NOR}"
- continue
- fi
- case "$arg" in
- -m)
- SPECIALMODE="GRABNEXT"
- ;;
- -u)
- ROOTLESS_USER="GRABNEXT"
- ;;
- *)
- [[ "${arg:0:1}" != "-" ]] || \
- show_usage "Unknown command-line option '$arg'."
- [[ -z "$IMAGE_NAME" ]] || \
- show_usage "Must specify exactly one image name, got '$IMAGE_NAME' and '$arg'."
- IMAGE_NAME="$arg"
- ;;
- esac
- done
+ [[ "$#" -eq 1 ]] || \
+ show_usage "Must specify a VM Image name to use, and the test flavor."
- if [[ "$SPECIALMODE" == "GRABNEXT" ]]
- then
- show_usage "Must specify argument to -m option."
- fi
+ VM_IMAGE_NAME="$1"
- if [[ "$ROOTLESS_USER" == "GRABNEXT" ]]
- then
- show_usage "Must specify argument to -u option."
- fi
+ # Word-splitting is desireable in this case
+ # shellcheck disable=SC2207
+ ENVS=(
+ $(get_env_vars)
+ "VM_IMAGE_NAME=$VM_IMAGE_NAME"
+ )
- if [[ -z "$IMAGE_NAME" ]]
- then
- show_usage "No image-name specified."
- fi
+ VMNAME="${VMNAME:-${USER}-${VM_IMAGE_NAME}}"
- if [[ "$SPECIALMODE" == "rootless" ]] && [[ -z "$ROOTLESS_USER" ]]
- then
- show_usage "With '-m rootless' must also pass -u <username> of rootless user."
- fi
-
- if echo "$IMAGE_NAME" | grep -q "image-builder-image"
- then
- echo -e "Creating an image-builder VM, I hope you know what you're doing.\n"
- IBI_ARGS="--scopes=compute-rw,storage-rw,userinfo-email"
- SSHUSER="centos"
- else
- unset IBI_ARGS
- SSHUSER="root"
- fi
+ CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image=${VM_IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $VMNAME"
- ENVS="$ENVS SPECIALMODE=\"$SPECIALMODE\""
+ SSH_CMD="$PGCLOUD compute ssh root@$VMNAME"
- [[ -z "$ROOTLESS_USER" ]] || \
- ENVS="$ENVS ROOTLESS_USER=$ROOTLESS_USER"
-
- SETUP_CMD="env $ENVS ADD_SECOND_PARTITIO=True $GOSRC/contrib/cirrus/setup_environment.sh"
- VMNAME="${VMNAME:-${USER}-${IMAGE_NAME}}"
+ CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME"
+}
- CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image=${IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $IBI_ARGS $VMNAME"
+# Returns true if user has run an 'init' and has a valid token for
+# the specific project-id and named-configuration argumens in $PGCLOUD.
+function has_valid_credentials() {
+ if $PGCLOUD info |& grep -Eq 'Account:.*None'; then
+ return 1
+ fi
- SSH_CMD="$PGCLOUD compute ssh $SSHUSER@$VMNAME"
+ # It's possible for 'gcloud info' to list expired credentials,
+ # e.g. 'ERROR: ... invalid grant: Bad Request'
+ if $PGCLOUD auth print-access-token |& grep -q 'ERROR'; then
+ return 1
+ fi
- CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME"
+ return 0
}
##### main
@@ -209,23 +189,17 @@ parse_args(){
cd "$LIBPODROOT"
parse_args "$@"
-
-# Ensure mount-points and data directories exist on host as $USER. Also prevents
-# permission-denied errors during cleanup() b/c `sudo podman` created mount-points
-# owned by root.
-mkdir -p $TMPDIR/${LIBPODROOT##$HOME}
mkdir -p $TMPDIR/.ssh
mkdir -p {$HOME,$TMPDIR}/.config/gcloud/ssh
chmod 700 {$HOME,$TMPDIR}/.config/gcloud/ssh $TMPDIR/.ssh
-cd $LIBPODROOT
+echo -e "\n${YEL}Pulling gcloud image...${NOR}"
+podman pull $GCLOUD_IMAGE
-# Attempt to determine if named 'libpod' gcloud configuration exists
-showrun $PGCLOUD info > $TMPDIR/gcloud-info
-if egrep -q "Account:.*None" $TMPDIR/gcloud-info
+if ! has_valid_credentials
then
echo -e "\n${YEL}WARNING: Can't find gcloud configuration for libpod, running init.${NOR}"
- echo -e " ${RED}Please choose "#1: Re-initialize" and "login" if asked.${NOR}"
+ echo -e " ${RED}Please choose \"#1: Re-initialize\" and \"login\" if asked.${NOR}"
showrun $PGCLOUD init --project=$PROJECT --console-only --skip-diagnostics
# Verify it worked (account name == someone@example.com)
@@ -236,68 +210,52 @@ then
exit 5
fi
- # If this is the only config, make it the default to avoid persistent warnings from gcloud
+ # If this is the only config, make it the default to avoid
+ # persistent warnings from gcloud about there being no default.
[[ -r "$HOME/.config/gcloud/configurations/config_default" ]] || \
- ln "$HOME/.config/gcloud/configurations/config_libpod" \
- "$HOME/.config/gcloud/configurations/config_default"
+ ln "$HOME/.config/gcloud/configurations/config_libpod" \
+ "$HOME/.config/gcloud/configurations/config_default"
fi
-# Couldn't make rsync work with gcloud's ssh wrapper because ssh-keys generated on the fly
-TARBALL=$VMNAME.tar.bz2
-echo -e "\n${YEL}Packing up local repository into a tarball.${NOR}"
-showrun --background tar cjf $TMPDIR/$TARBALL --warning=no-file-changed --exclude-vcs-ignores -C $LIBPODROOT .
-
-trap delvm INT # Allow deleting VM if CTRL-C during create
-# This fails if VM already exists: permit this usage to re-init
+trap delvm EXIT # Allow deleting VM if CTRL-C during create
echo -e "\n${YEL}Trying to creating a VM named $VMNAME${NOR}\n${YEL}in GCE region/zone $ZONE${NOR}"
-echo -e "For faster access, export ZONE='something-closer-<any letter>'"
-echo 'List of regions and zones: https://cloud.google.com/compute/docs/regions-zones/'
-echo -e "${RED}(might take a minute/two. Errors ignored).${NOR}"
-showrun $CREATE_CMD || true # allow re-running commands below when "delete: N"
-
-# Any subsequent failure should prompt for VM deletion
-trap - INT
-trap delvm EXIT
-
-echo -e "\n${YEL}Waiting up to 30s for ssh port to open${NOR}"
-trap 'COUNT=9999' INT
-ATTEMPTS=10
-for (( COUNT=1 ; COUNT <= $ATTEMPTS ; COUNT++ ))
-do
- if $SSH_CMD --command "true"; then break; else sleep 3s; fi
-done
-if (( COUNT > $ATTEMPTS ))
-then
- echo -e "\n${RED}Failed${NOR}"
- exit 7
-fi
-echo -e "${YEL}Got it${NOR}"
-
-echo -e "\n${YEL}Removing and re-creating $GOSRC on $VMNAME.${NOR}"
-showrun $SSH_CMD --command "rm -rf $GOSRC"
-showrun $SSH_CMD --command "mkdir -p $GOSRC"
-
-echo -e "\n${YEL}Transferring tarball to $VMNAME.${NOR}"
-wait
-showrun $SCP_CMD $HOME/$TARBALL $SSHUSER@$VMNAME:/tmp/$TARBALL
-
-echo -e "\n${YEL}Unpacking tarball into $GOSRC on $VMNAME.${NOR}"
-showrun $SSH_CMD --command "tar xjf /tmp/$TARBALL -C $GOSRC"
+echo -e "For faster terminal access, export ZONE='<something-closer>'"
+echo -e 'Zone-list at: https://cloud.google.com/compute/docs/regions-zones/\n'
+if showrun $CREATE_CMD; then # Freshly created VM needs initial setup
+
+ echo -e "\n${YEL}Waiting up to 30s for ssh port to open${NOR}"
+ ATTEMPTS=10
+ trap "exit 1" INT
+ while ((ATTEMPTS)) && ! $SSH_CMD --command "true"; do
+ let "ATTEMPTS--"
+ echo -e "${RED}Nope, not yet.${NOR}"
+ sleep 3s
+ done
+ trap - INT
+ if ! ((ATTEMPTS)); then
+ echo -e "\n${RED}Failed${NOR}"
+ exit 7
+ fi
+ echo -e "${YEL}Got it. Cloning upstream repository as a starting point.${NOR}"
-echo -e "\n${YEL}Removing tarball on $VMNAME.${NOR}"
-showrun $SSH_CMD --command "rm -f /tmp/$TARBALL"
+ showrun $SSH_CMD -- "mkdir -p $GOSRC"
+ showrun $SSH_CMD -- "git clone --progress $GIT_REPO $GOSRC"
-echo -e "\n${YEL}Executing environment setup${NOR}"
-showrun $SSH_CMD --command "$SETUP_CMD"
+ if [[ -x "$HOME/$GCLOUD_CFGDIR/$HOOK_FILENAME" ]]; then
+ echo -e "\n${YEL}Copying hook to VM and executing (ignoring errors).${NOR}"
+ $PGCLOUD compute scp "/root/$GCLOUD_CFGDIR/$HOOK_FILENAME" root@$VMNAME:.
+ if ! showrun $SSH_CMD -- "cd $GOSRC && bash /root/$HOOK_FILENAME $(git branch --show-current) $(git rev-parse HEAD)"; then
+ echo "-e ${RED}Hook exited: $?${NOR}"
+ fi
+ fi
+fi
-VMIP=$($PGCLOUD compute instances describe $VMNAME --format='get(networkInterfaces[0].accessConfigs[0].natIP)')
+echo -e "\n${YEL}Generating connection script for $VMNAME.${NOR}"
+echo -e "Note: Script can be re-used in another terminal if needed."
+echo -e "${RED}(option to delete VM presented upon exiting).${NOR}"
+# TODO: This is fairly fragile, specifically the quoting for the remote command.
+echo '#!/bin/bash' > $TMPDIR/ssh
+echo "$SSH_CMD -- -t 'cd $GOSRC && exec env \"${ENVS[*]}\" bash -il'" >> $TMPDIR/ssh
+chmod +x $TMPDIR/ssh
-echo -e "\n${YEL}Connecting to $VMNAME${NOR}\nPublic IP Address: $VMIP\n${RED}(option to delete VM upon logout).${NOR}\n"
-if [[ -n "$ROOTLESS_USER" ]]
-then
- echo "Re-chowning source files after transfer"
- showrun $SSH_CMD --command "chown -R $ROOTLESS_USER $GOSRC"
- echo "Connecting as user $ROOTLESS_USER"
- SSH_CMD="$PGCLOUD compute ssh $ROOTLESS_USER@$VMNAME"
-fi
-showrun $SSH_CMD -- -t "cd $GOSRC && exec env $ENVS bash -il"
+showrun $TMPDIR/ssh
diff --git a/libpod/container.go b/libpod/container.go
index 9b4ccbd5f..01419500e 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -888,9 +888,22 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, linuxNS.String()), nil
}
+// CgroupManager returns the cgroup manager used by the given container.
+func (c *Container) CgroupManager() string {
+ cgroupManager := c.config.CgroupManager
+ if cgroupManager == "" {
+ cgroupManager = c.runtime.config.Engine.CgroupManager
+ }
+ return cgroupManager
+}
+
// CGroupPath returns a cgroups "path" for a given container.
func (c *Container) CGroupPath() (string, error) {
+ cgroupManager := c.CgroupManager()
+
switch {
+ case c.config.NoCgroups || c.config.CgroupsMode == "disabled":
+ return "", errors.Wrapf(define.ErrNoCgroups, "this container is not creating cgroups")
case c.config.CgroupsMode == cgroupSplit:
if c.config.CgroupParent != "" {
return "", errors.Errorf("cannot specify cgroup-parent with cgroup-mode %q", cgroupSplit)
@@ -906,9 +919,9 @@ func (c *Container) CGroupPath() (string, error) {
return "", errors.Errorf("invalid cgroup for conmon %q", cg)
}
return strings.TrimSuffix(cg, "/supervisor") + "/container", nil
- case c.runtime.config.Engine.CgroupManager == config.CgroupfsCgroupsManager:
+ case cgroupManager == config.CgroupfsCgroupsManager:
return filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID())), nil
- case c.runtime.config.Engine.CgroupManager == config.SystemdCgroupsManager:
+ case cgroupManager == config.SystemdCgroupsManager:
if rootless.IsRootless() {
uid := rootless.GetRootlessUID()
parts := strings.SplitN(c.config.CgroupParent, "/", 2)
@@ -922,7 +935,7 @@ func (c *Container) CGroupPath() (string, error) {
}
return filepath.Join(c.config.CgroupParent, createUnitName("libpod", c.ID())), nil
default:
- return "", errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager %s in use", c.runtime.config.Engine.CgroupManager)
+ return "", errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager %s in use", cgroupManager)
}
}
diff --git a/libpod/container_config.go b/libpod/container_config.go
index fc93140dd..e264da4da 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -275,13 +275,16 @@ type ContainerMiscConfig struct {
StopTimeout uint `json:"stopTimeout,omitempty"`
// Time container was created
CreatedTime time.Time `json:"createdTime"`
+ // CgroupManager is the cgroup manager used to create this container.
+ // If empty, the runtime default will be used.
+ CgroupManager string `json:"cgroupManager,omitempty"`
// NoCgroups indicates that the container will not create CGroups. It is
// incompatible with CgroupParent. Deprecated in favor of CgroupsMode.
NoCgroups bool `json:"noCgroups,omitempty"`
// CgroupsMode indicates how the container will create cgroups
// (disabled, no-conmon, enabled). It supersedes NoCgroups.
CgroupsMode string `json:"cgroupsMode,omitempty"`
- // Cgroup parent of the container
+ // Cgroup parent of the container.
CgroupParent string `json:"cgroupParent"`
// LogPath log location
LogPath string `json:"logPath"`
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 835dccd71..b8bce1272 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -729,7 +729,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
// CGroup parent
// Need to check if it's the default, and not print if so.
defaultCgroupParent := ""
- switch c.runtime.config.Engine.CgroupManager {
+ switch c.CgroupManager() {
case config.CgroupfsCgroupsManager:
defaultCgroupParent = CgroupfsDefaultCgroupParent
case config.SystemdCgroupsManager:
@@ -738,6 +738,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
if c.config.CgroupParent != defaultCgroupParent {
hostConfig.CgroupParent = c.config.CgroupParent
}
+ hostConfig.CgroupManager = c.CgroupManager()
// PID namespace mode
pidMode := ""
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 41cc80789..3a71c6601 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -1965,6 +1965,7 @@ func (c *Container) getOCICgroupPath() (string, error) {
if err != nil {
return "", err
}
+ cgroupManager := c.CgroupManager()
switch {
case (rootless.IsRootless() && !unified) || c.config.NoCgroups:
return "", nil
@@ -1977,14 +1978,14 @@ func (c *Container) getOCICgroupPath() (string, error) {
return "", err
}
return filepath.Join(selfCgroup, "container"), nil
- case c.runtime.config.Engine.CgroupManager == config.SystemdCgroupsManager:
+ case cgroupManager == config.SystemdCgroupsManager:
// When the OCI runtime is set to use Systemd as a cgroup manager, it
// expects cgroups to be passed as follows:
// slice:prefix:name
systemdCgroups := fmt.Sprintf("%s:libpod:%s", path.Base(c.config.CgroupParent), c.ID())
logrus.Debugf("Setting CGroups for container %s to %s", c.ID(), systemdCgroups)
return systemdCgroups, nil
- case c.runtime.config.Engine.CgroupManager == config.CgroupfsCgroupsManager:
+ case cgroupManager == config.CgroupfsCgroupsManager:
cgroupPath, err := c.CGroupPath()
if err != nil {
return "", err
@@ -1992,7 +1993,7 @@ func (c *Container) getOCICgroupPath() (string, error) {
logrus.Debugf("Setting CGroup path for container %s to %s", c.ID(), cgroupPath)
return cgroupPath, nil
default:
- return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", c.runtime.config.Engine.CgroupManager)
+ return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", cgroupManager)
}
}
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index 44c3d515b..38b3a6686 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -236,6 +236,9 @@ type InspectContainerHostConfig struct {
// include a Mounts field in inspect.
// Format: <src>:<destination>[:<comma-separated options>]
Binds []string `json:"Binds"`
+ // CgroupManager is the cgroup manager used by the container.
+ // At present, allowed values are either "cgroupfs" or "systemd".
+ CgroupManager string `json:"CgroupManager,omitempty"`
// CgroupMode is the configuration of the container's cgroup namespace.
// Populated as follows:
// private - a cgroup namespace has been created
diff --git a/libpod/image/docker_registry_options.go b/libpod/image/docker_registry_options.go
index 257b7ae8d..835473a1f 100644
--- a/libpod/image/docker_registry_options.go
+++ b/libpod/image/docker_registry_options.go
@@ -55,6 +55,7 @@ func (o DockerRegistryOptions) GetSystemContext(parent *types.SystemContext, add
sc.DockerRegistryUserAgent = parent.DockerRegistryUserAgent
sc.OSChoice = parent.OSChoice
sc.ArchitectureChoice = parent.ArchitectureChoice
+ sc.BlobInfoCacheDir = parent.BlobInfoCacheDir
}
return sc
}
diff --git a/libpod/kube.go b/libpod/kube.go
index 6df79e394..cd5064c84 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -307,18 +307,40 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
kubeContainer.StdinOnce = false
kubeContainer.TTY = c.config.Spec.Process.Terminal
- // TODO add CPU limit support.
if c.config.Spec.Linux != nil &&
- c.config.Spec.Linux.Resources != nil &&
- c.config.Spec.Linux.Resources.Memory != nil &&
- c.config.Spec.Linux.Resources.Memory.Limit != nil {
- if kubeContainer.Resources.Limits == nil {
- kubeContainer.Resources.Limits = v1.ResourceList{}
+ c.config.Spec.Linux.Resources != nil {
+ if c.config.Spec.Linux.Resources.Memory != nil &&
+ c.config.Spec.Linux.Resources.Memory.Limit != nil {
+ if kubeContainer.Resources.Limits == nil {
+ kubeContainer.Resources.Limits = v1.ResourceList{}
+ }
+
+ qty := kubeContainer.Resources.Limits.Memory()
+ qty.Set(*c.config.Spec.Linux.Resources.Memory.Limit)
+ kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty
}
- qty := kubeContainer.Resources.Limits.Memory()
- qty.Set(*c.config.Spec.Linux.Resources.Memory.Limit)
- kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty
+ if c.config.Spec.Linux.Resources.CPU != nil &&
+ c.config.Spec.Linux.Resources.CPU.Quota != nil &&
+ c.config.Spec.Linux.Resources.CPU.Period != nil {
+ quota := *c.config.Spec.Linux.Resources.CPU.Quota
+ period := *c.config.Spec.Linux.Resources.CPU.Period
+
+ if quota > 0 && period > 0 {
+ cpuLimitMilli := int64(1000 * float64(quota) / float64(period))
+
+ // Kubernetes: precision finer than 1m is not allowed
+ if cpuLimitMilli >= 1 {
+ if kubeContainer.Resources.Limits == nil {
+ kubeContainer.Resources.Limits = v1.ResourceList{}
+ }
+
+ qty := kubeContainer.Resources.Limits.Cpu()
+ qty.SetMilli(cpuLimitMilli)
+ kubeContainer.Resources.Limits[v1.ResourceCPU] = *qty
+ }
+ }
+ }
}
return kubeContainer, kubeVolumes, nil
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 7fb374e0d..94630e57b 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -57,7 +57,6 @@ type ConmonOCIRuntime struct {
path string
conmonPath string
conmonEnv []string
- cgroupManager string
tmpDir string
exitsDir string
socketsDir string
@@ -102,7 +101,6 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
runtime.runtimeFlags = runtimeFlags
runtime.conmonEnv = runtimeCfg.Engine.ConmonEnvVars
- runtime.cgroupManager = runtimeCfg.Engine.CgroupManager
runtime.tmpDir = runtimeCfg.Engine.TmpDir
runtime.logSizeMax = runtimeCfg.Containers.LogSizeMax
runtime.noPivot = runtimeCfg.Engine.NoPivotRoot
@@ -149,10 +147,6 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket")
- if runtime.cgroupManager != config.CgroupfsCgroupsManager && runtime.cgroupManager != config.SystemdCgroupsManager {
- return nil, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager)
- }
-
// Create the exit files and attach sockets directories
if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil {
// The directory is allowed to exist
@@ -1325,7 +1319,7 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
args = append(args, rFlags...)
}
- if r.cgroupManager == config.SystemdCgroupsManager && !ctr.config.NoCgroups && ctr.config.CgroupsMode != cgroupSplit {
+ if ctr.CgroupManager() == config.SystemdCgroupsManager && !ctr.config.NoCgroups && ctr.config.CgroupsMode != cgroupSplit {
args = append(args, "-s")
}
@@ -1442,8 +1436,10 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
}
if mustCreateCgroup {
+ // TODO: This should be a switch - we are not guaranteed that
+ // there are only 2 valid cgroup managers
cgroupParent := ctr.CgroupParent()
- if r.cgroupManager == config.SystemdCgroupsManager {
+ if ctr.CgroupManager() == config.SystemdCgroupsManager {
unitName := createUnitName("libpod-conmon", ctr.ID())
realCgroupParent := cgroupParent
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index abb97293f..51b4c5f03 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -208,6 +208,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
// Check CGroup parent sanity, and set it if it was not set.
// Only if we're actually configuring CGroups.
if !ctr.config.NoCgroups {
+ ctr.config.CgroupManager = r.config.Engine.CgroupManager
switch r.config.Engine.CgroupManager {
case config.CgroupfsCgroupsManager:
if ctr.config.CgroupParent == "" {
diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json
index cd885fce2..31795516c 100644
--- a/nix/nixpkgs.json
+++ b/nix/nixpkgs.json
@@ -1,7 +1,7 @@
{
"url": "https://github.com/nixos/nixpkgs",
- "rev": "d5a689edda8219a1e20fd3871174b994cf0a94a3",
- "date": "2020-09-13T01:58:20+02:00",
- "sha256": "0m6nmi1fx0glfbg52kqdjgidxylk4p5xnx9v35wlsfi1j2xhkia4",
+ "rev": "c095d986c73b4e3d82af299b4175b9b475ebbf3a",
+ "date": "2020-10-07T23:58:44-03:00",
+ "sha256": "0ygv3wq26mxvy6kahs95ivl6n80bac3pbh6xmgw9ijcnnr03lm01",
"fetchSubmodules": false
}
diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go
index a24dbaa47..4ce31cc83 100644
--- a/pkg/api/handlers/compat/containers_create.go
+++ b/pkg/api/handlers/compat/containers_create.go
@@ -14,8 +14,10 @@ import (
"github.com/containers/podman/v2/pkg/api/handlers"
"github.com/containers/podman/v2/pkg/api/handlers/utils"
"github.com/containers/podman/v2/pkg/namespaces"
+ "github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/signal"
createconfig "github.com/containers/podman/v2/pkg/spec"
+ "github.com/containers/podman/v2/pkg/specgen"
"github.com/containers/storage"
"github.com/gorilla/schema"
"github.com/pkg/errors"
@@ -134,6 +136,11 @@ func makeCreateConfig(ctx context.Context, containerConfig *config.Config, input
Sysctl: input.HostConfig.Sysctls,
}
+ var netmode namespaces.NetworkMode
+ if rootless.IsRootless() {
+ netmode = namespaces.NetworkMode(specgen.Slirp)
+ }
+
network := createconfig.NetworkConfig{
DNSOpt: input.HostConfig.DNSOptions,
DNSSearch: input.HostConfig.DNSSearch,
@@ -144,7 +151,7 @@ func makeCreateConfig(ctx context.Context, containerConfig *config.Config, input
IPAddress: "",
LinkLocalIP: nil, // docker-only
MacAddress: input.MacAddress,
- // NetMode: nil,
+ NetMode: netmode,
Network: input.HostConfig.NetworkMode.NetworkName(),
NetworkAlias: nil, // docker-only now
PortBindings: input.HostConfig.PortBindings,
diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go
index 9d8bc497a..f49ce59da 100644
--- a/pkg/api/handlers/compat/images.go
+++ b/pkg/api/handlers/compat/images.go
@@ -55,6 +55,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
return
}
+ defer os.Remove(tmpfile.Name())
if err := tmpfile.Close(); err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
return
@@ -69,7 +70,6 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
return
}
defer rdr.Close()
- defer os.Remove(tmpfile.Name())
utils.WriteResponse(w, http.StatusOK, rdr)
}
@@ -398,3 +398,43 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
Stream: fmt.Sprintf("Loaded image: %s\n", id),
})
}
+
+func ExportImages(w http.ResponseWriter, r *http.Request) {
+ // 200 OK
+ // 500 Error
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+
+ query := struct {
+ Names string `schema:"names"`
+ }{
+ // This is where you can override the golang default value for one of fields
+ }
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+ images := make([]string, 0)
+ images = append(images, strings.Split(query.Names, ",")...)
+ tmpfile, err := ioutil.TempFile("", "api.tar")
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ return
+ }
+ defer os.Remove(tmpfile.Name())
+ if err := tmpfile.Close(); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ return
+ }
+ if err := runtime.ImageRuntime().SaveImages(r.Context(), images, "docker-archive", tmpfile.Name(), false); err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ rdr, err := os.Open(tmpfile.Name())
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
+ return
+ }
+ defer rdr.Close()
+ utils.WriteResponse(w, http.StatusOK, rdr)
+}
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index cb0d26d1e..ad779203d 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -275,6 +275,31 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
r.Handle(VersionedPath("/images/{name:.*}/get"), s.APIHandler(compat.ExportImage)).Methods(http.MethodGet)
// Added non version path to URI to support docker non versioned paths
r.Handle("/images/{name:.*}/get", s.APIHandler(compat.ExportImage)).Methods(http.MethodGet)
+ // swagger:operation GET /images/get compat get
+ // ---
+ // tags:
+ // - images (compat)
+ // summary: Export several images
+ // description: Get a tarball containing all images and metadata for several image repositories
+ // parameters:
+ // - in: query
+ // name: names
+ // type: string
+ // required: true
+ // description: one or more image names or IDs comma separated
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: no error
+ // schema:
+ // type: string
+ // format: binary
+ // 500:
+ // $ref: '#/responses/InternalError'
+ r.Handle(VersionedPath("/images/get"), s.APIHandler(compat.ExportImages)).Methods(http.MethodGet)
+ // Added non version path to URI to support docker non versioned paths
+ r.Handle("/images/get", s.APIHandler(compat.ExportImages)).Methods(http.MethodGet)
// swagger:operation GET /images/{name:.*}/history compat imageHistory
// ---
// tags:
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index 7b272f01e..3b6dd106f 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -131,7 +131,6 @@ type RmOptions struct {
Force bool
Ignore bool
Latest bool
- Storage bool
Volumes bool
}
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index 2ba369b83..356e6869d 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -24,6 +24,8 @@ type PlayKubeOptions struct {
// SeccompProfileRoot - path to a directory containing seccomp
// profiles.
SeccompProfileRoot string
+ // ConfigMaps - slice of pathnames to kubernetes configmap YAMLs.
+ ConfigMaps []string
}
// PlayKubePod represents a single pod and associated containers created by play kube
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 0107e18c4..ac7523094 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -273,16 +273,6 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
reports := []*entities.RmReport{}
- if options.Storage {
- for _, ctr := range namesOrIds {
- report := entities.RmReport{Id: ctr}
- if err := ic.Libpod.RemoveStorageContainer(ctr, options.Force); err != nil {
- report.Err = err
- }
- reports = append(reports, &report)
- }
- return reports, nil
- }
names := namesOrIds
for _, cidFile := range options.CIDFiles {
@@ -294,6 +284,22 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
names = append(names, id)
}
+ // Attempt to remove named containers directly from storage, if container is defined in libpod
+ // this will fail and code will fall through to removing the container from libpod.`
+ tmpNames := []string{}
+ for _, ctr := range names {
+ report := entities.RmReport{Id: ctr}
+ if err := ic.Libpod.RemoveStorageContainer(ctr, options.Force); err != nil {
+ // remove container names that we successfully deleted
+ tmpNames = append(tmpNames, ctr)
+ } else {
+ reports = append(reports, &report)
+ }
+ }
+ if len(tmpNames) < len(names) {
+ names = tmpNames
+ }
+
ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
// Failed to get containers. If force is specified, get the containers ID
@@ -302,7 +308,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
return nil, err
}
- for _, ctr := range namesOrIds {
+ for _, ctr := range names {
logrus.Debugf("Evicting container %q", ctr)
report := entities.RmReport{Id: ctr}
id, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 40edc1ae3..2de98d8f5 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -311,6 +311,22 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
ctrRestartPolicy = libpod.RestartPolicyAlways
}
+ configMaps := []v1.ConfigMap{}
+ for _, p := range options.ConfigMaps {
+ f, err := os.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ cm, err := readConfigMapFromFile(f)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%q", p)
+ }
+
+ configMaps = append(configMaps, cm)
+ }
+
containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers))
for _, container := range podYAML.Spec.Containers {
pullPolicy := util.PullImageMissing
@@ -334,7 +350,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if err != nil {
return nil, err
}
- conf, err := kubeContainerToCreateConfig(ctx, container, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, seccompPaths)
+ conf, err := kubeContainerToCreateConfig(ctx, container, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, configMaps, seccompPaths)
if err != nil {
return nil, err
}
@@ -447,7 +463,7 @@ func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfi
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
-func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, configMaps []v1.ConfigMap, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
pidConfig createconfig.PidConfig
@@ -572,8 +588,17 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
}
envs = imageEnv
}
- for _, e := range containerYAML.Env {
- envs[e.Name] = e.Value
+ for _, env := range containerYAML.Env {
+ value := envVarValue(env, configMaps)
+
+ envs[env.Name] = value
+ }
+ for _, envFrom := range containerYAML.EnvFrom {
+ cmEnvs := envVarsFromConfigMap(envFrom, configMaps)
+
+ for k, v := range cmEnvs {
+ envs[k] = v
+ }
}
containerConfig.Env = envs
@@ -594,6 +619,62 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
return &containerConfig, nil
}
+// readConfigMapFromFile returns a kubernetes configMap obtained from --configmap flag
+func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
+ var cm v1.ConfigMap
+
+ content, err := ioutil.ReadAll(r)
+ if err != nil {
+ return cm, errors.Wrapf(err, "unable to read ConfigMap YAML content")
+ }
+
+ if err := yaml.Unmarshal(content, &cm); err != nil {
+ return cm, errors.Wrapf(err, "unable to read YAML as Kube ConfigMap")
+ }
+
+ if cm.Kind != "ConfigMap" {
+ return cm, errors.Errorf("invalid YAML kind: %q. [ConfigMap] is the only supported by --configmap", cm.Kind)
+ }
+
+ return cm, nil
+}
+
+// envVarsFromConfigMap returns all key-value pairs as env vars from a configMap that matches the envFrom setting of a container
+func envVarsFromConfigMap(envFrom v1.EnvFromSource, configMaps []v1.ConfigMap) map[string]string {
+ envs := map[string]string{}
+
+ if envFrom.ConfigMapRef != nil {
+ cmName := envFrom.ConfigMapRef.Name
+
+ for _, c := range configMaps {
+ if cmName == c.Name {
+ envs = c.Data
+ break
+ }
+ }
+ }
+
+ return envs
+}
+
+// envVarValue returns the environment variable value configured within the container's env setting.
+// It gets the value from a configMap if specified, otherwise returns env.Value
+func envVarValue(env v1.EnvVar, configMaps []v1.ConfigMap) string {
+ for _, c := range configMaps {
+ if env.ValueFrom != nil {
+ if env.ValueFrom.ConfigMapKeyRef != nil {
+ if env.ValueFrom.ConfigMapKeyRef.Name == c.Name {
+ if value, ok := c.Data[env.ValueFrom.ConfigMapKeyRef.Key]; ok {
+ return value
+ }
+ }
+ }
+ }
+ }
+
+ return env.Value
+}
+
// kubeSeccompPaths holds information about a pod YAML's seccomp configuration
// it holds both container and pod seccomp paths
type kubeSeccompPaths struct {
diff --git a/pkg/domain/infra/abi/play_test.go b/pkg/domain/infra/abi/play_test.go
new file mode 100644
index 000000000..5595476c3
--- /dev/null
+++ b/pkg/domain/infra/abi/play_test.go
@@ -0,0 +1,254 @@
+package abi
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var configMapList = []v1.ConfigMap{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bar",
+ },
+ Data: map[string]string{
+ "myvar": "bar",
+ },
+ },
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ },
+ Data: map[string]string{
+ "myvar": "foo",
+ },
+ },
+}
+
+func TestReadConfigMapFromFile(t *testing.T) {
+ tests := []struct {
+ name string
+ configMapContent string
+ expectError bool
+ expectedErrorMsg string
+ expected v1.ConfigMap
+ }{
+ {
+ "ValidConfigMap",
+ `
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ false,
+ "",
+ v1.ConfigMap{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ APIVersion: "v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ },
+ Data: map[string]string{
+ "myvar": "foo",
+ },
+ },
+ },
+ {
+ "InvalidYAML",
+ `
+Invalid YAML
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ true,
+ "unable to read YAML as Kube ConfigMap",
+ v1.ConfigMap{},
+ },
+ {
+ "InvalidKind",
+ `
+apiVersion: v1
+kind: InvalidKind
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ true,
+ "invalid YAML kind",
+ v1.ConfigMap{},
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ buf := bytes.NewBufferString(test.configMapContent)
+ cm, err := readConfigMapFromFile(buf)
+
+ if test.expectError {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), test.expectedErrorMsg)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, test.expected, cm)
+ }
+ })
+ }
+}
+
+func TestEnvVarsFromConfigMap(t *testing.T) {
+ tests := []struct {
+ name string
+ envFrom v1.EnvFromSource
+ configMapList []v1.ConfigMap
+ expected map[string]string
+ }{
+ {
+ "ConfigMapExists",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ },
+ },
+ configMapList,
+ map[string]string{
+ "myvar": "foo",
+ },
+ },
+ {
+ "ConfigMapDoesNotExist",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "doesnotexist",
+ },
+ },
+ },
+ configMapList,
+ map[string]string{},
+ },
+ {
+ "EmptyConfigMapList",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ },
+ },
+ []v1.ConfigMap{},
+ map[string]string{},
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ result := envVarsFromConfigMap(test.envFrom, test.configMapList)
+ assert.Equal(t, test.expected, result)
+ })
+ }
+}
+
+func TestEnvVarValue(t *testing.T) {
+ tests := []struct {
+ name string
+ envVar v1.EnvVar
+ configMapList []v1.ConfigMap
+ expected string
+ }{
+ {
+ "ConfigMapExists",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ configMapList,
+ "foo",
+ },
+ {
+ "ContainerKeyDoesNotExistInConfigMap",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "doesnotexist",
+ },
+ },
+ },
+ configMapList,
+ "",
+ },
+ {
+ "ConfigMapDoesNotExist",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "doesnotexist",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ configMapList,
+ "",
+ },
+ {
+ "EmptyConfigMapList",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ []v1.ConfigMap{},
+ "",
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ result := envVarValue(test.envVar, test.configMapList)
+ assert.Equal(t, test.expected, result)
+ })
+ }
+}
diff --git a/test/apiv2/10-images.at b/test/apiv2/10-images.at
index bdc298ae3..f669bc892 100644
--- a/test/apiv2/10-images.at
+++ b/test/apiv2/10-images.at
@@ -68,4 +68,7 @@ for i in $iid ${iid:0:12} $PODMAN_TEST_IMAGE_NAME; do
t GET "libpod/images/$i/get?compress=false" 200 '[POSIX tar archive]'
done
+# Export more than one image
+t GET images/get?names=alpine,busybox 200 '[POSIX tar archive]'
+
# vim: filetype=sh
diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at
index 28289955a..d7e5bfee8 100644
--- a/test/apiv2/20-containers.at
+++ b/test/apiv2/20-containers.at
@@ -206,7 +206,7 @@ t POST containers/${cid_top}/stop "" 204
t DELETE containers/$cid 204
t DELETE containers/$cid_top 204
-# test the apiv2 create, should't ignore the ENV and WORKDIR from the image
+# test the apiv2 create, shouldn't ignore the ENV and WORKDIR from the image
t POST containers/create '"Image":"'$ENV_WORKDIR_IMG'","Env":["testKey1"]' 201 \
.Id~[0-9a-f]\\{64\\}
cid=$(jq -r '.Id' <<<"$output")
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index 05a7f4ddf..3c4a1008b 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -260,6 +260,38 @@ var _ = Describe("Podman generate kube", func() {
}
})
+ It("podman generate kube on pod with cpu limit", func() {
+ podName := "testCpuLimit"
+ podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName})
+ podSession.WaitWithDefaultTimeout()
+ Expect(podSession.ExitCode()).To(Equal(0))
+
+ ctr1Name := "ctr1"
+ ctr1Session := podmanTest.Podman([]string{"create", "--name", ctr1Name, "--pod", podName,
+ "--cpus", "0.5", ALPINE, "top"})
+ ctr1Session.WaitWithDefaultTimeout()
+ Expect(ctr1Session.ExitCode()).To(Equal(0))
+
+ ctr2Name := "ctr2"
+ ctr2Session := podmanTest.Podman([]string{"create", "--name", ctr2Name, "--pod", podName,
+ "--cpu-period", "100000", "--cpu-quota", "50000", ALPINE, "top"})
+ ctr2Session.WaitWithDefaultTimeout()
+ Expect(ctr2Session.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", podName})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ pod := new(v1.Pod)
+ err := yaml.Unmarshal(kube.Out.Contents(), pod)
+ Expect(err).To(BeNil())
+
+ for _, ctr := range pod.Spec.Containers {
+ cpuLimit := ctr.Resources.Limits.Cpu().MilliValue()
+ Expect(cpuLimit).To(Equal(int64(500)))
+ }
+ })
+
It("podman generate kube on pod with ports", func() {
podName := "test"
podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName, "-p", "4000:4000", "-p", "5000:5000"})
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index a15359ea3..cbfd72da6 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -28,7 +28,7 @@ func removeConf(confPath string) {
// generateNetworkConfig generates a cni config with a random name
// it returns the network name and the filepath
func generateNetworkConfig(p *PodmanTestIntegration) (string, string) {
- // generate a random name to preven conflicts with other tests
+ // generate a random name to prevent conflicts with other tests
name := "net" + stringid.GenerateNonCryptoID()
path := filepath.Join(p.CNIConfigDir, fmt.Sprintf("%s.conflist", name))
conf := fmt.Sprintf(`{
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index b7398a58a..b6a390950 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -25,6 +25,19 @@ spec:
hostname: unknown
`
+var configMapYamlTemplate = `
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Name }}
+data:
+{{ with .Data }}
+ {{ range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{ end }}
+{{ end }}
+`
+
var podYamlTemplate = `
apiVersion: v1
kind: Pod
@@ -75,6 +88,26 @@ spec:
- name: HOSTNAME
- name: container
value: podman
+ {{ range .Env }}
+ - name: {{ .Name }}
+ {{ if (eq .ValueFrom "configmap") }}
+ valueFrom:
+ configMapKeyRef:
+ name: {{ .RefName }}
+ key: {{ .RefKey }}
+ {{ else }}
+ value: {{ .Value }}
+ {{ end }}
+ {{ end }}
+ {{ with .EnvFrom}}
+ envFrom:
+ {{ range . }}
+ {{ if (eq .From "configmap") }}
+ - configMapRef:
+ name: {{ .Name }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
image: {{ .Image }}
name: {{ .Name }}
imagePullPolicy: {{ .PullPolicy }}
@@ -226,6 +259,7 @@ var (
defaultPodName = "testPod"
defaultVolName = "testVol"
defaultDeploymentName = "testDeployment"
+ defaultConfigMapName = "testConfigMap"
seccompPwdEPERM = []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
)
@@ -244,34 +278,64 @@ func writeYaml(content string, fileName string) error {
return nil
}
-func generatePodKubeYaml(pod *Pod, fileName string) error {
+func generateKubeYaml(kind string, object interface{}, pathname string) error {
+ var yamlTemplate string
templateBytes := &bytes.Buffer{}
- t, err := template.New("pod").Parse(podYamlTemplate)
+ switch kind {
+ case "configmap":
+ yamlTemplate = configMapYamlTemplate
+ case "pod":
+ yamlTemplate = podYamlTemplate
+ case "deployment":
+ yamlTemplate = deploymentYamlTemplate
+ default:
+ return fmt.Errorf("unsupported kubernetes kind")
+ }
+
+ t, err := template.New(kind).Parse(yamlTemplate)
if err != nil {
return err
}
- if err := t.Execute(templateBytes, pod); err != nil {
+ if err := t.Execute(templateBytes, object); err != nil {
return err
}
- return writeYaml(templateBytes.String(), fileName)
+ return writeYaml(templateBytes.String(), pathname)
}
-func generateDeploymentKubeYaml(deployment *Deployment, fileName string) error {
- templateBytes := &bytes.Buffer{}
+// ConfigMap describes the options a kube yaml can be configured at configmap level
+type ConfigMap struct {
+ Name string
+ Data map[string]string
+}
- t, err := template.New("deployment").Parse(deploymentYamlTemplate)
- if err != nil {
- return err
+func getConfigMap(options ...configMapOption) *ConfigMap {
+ cm := ConfigMap{
+ Name: defaultConfigMapName,
+ Data: map[string]string{},
}
- if err := t.Execute(templateBytes, deployment); err != nil {
- return err
+ for _, option := range options {
+ option(&cm)
}
- return writeYaml(templateBytes.String(), fileName)
+ return &cm
+}
+
+type configMapOption func(*ConfigMap)
+
+func withConfigMapName(name string) configMapOption {
+ return func(configmap *ConfigMap) {
+ configmap.Name = name
+ }
+}
+
+func withConfigMapData(k, v string) configMapOption {
+ return func(configmap *ConfigMap) {
+ configmap.Data[k] = v
+ }
}
// Pod describes the options a kube yaml can be configured at pod level
@@ -450,12 +514,14 @@ type Ctr struct {
VolumeMountPath string
VolumeName string
VolumeReadOnly bool
+ Env []Env
+ EnvFrom []EnvFrom
}
// getCtr takes a list of ctrOptions and returns a Ctr with sane defaults
// and the configured options
func getCtr(options ...ctrOption) *Ctr {
- c := Ctr{defaultCtrName, defaultCtrImage, defaultCtrCmd, defaultCtrArg, true, false, nil, nil, "", "", "", false, "", "", false}
+ c := Ctr{defaultCtrName, defaultCtrImage, defaultCtrCmd, defaultCtrArg, true, false, nil, nil, "", "", "", false, "", "", false, []Env{}, []EnvFrom{}}
for _, option := range options {
option(&c)
}
@@ -524,6 +590,31 @@ func withVolumeMount(mountPath string, readonly bool) ctrOption {
}
}
+func withEnv(name, value, valueFrom, refName, refKey string) ctrOption {
+ return func(c *Ctr) {
+ e := Env{
+ Name: name,
+ Value: value,
+ ValueFrom: valueFrom,
+ RefName: refName,
+ RefKey: refKey,
+ }
+
+ c.Env = append(c.Env, e)
+ }
+}
+
+func withEnvFrom(name, from string) ctrOption {
+ return func(c *Ctr) {
+ e := EnvFrom{
+ Name: name,
+ From: from,
+ }
+
+ c.EnvFrom = append(c.EnvFrom, e)
+ }
+}
+
func getCtrNameInPod(pod *Pod) string {
return fmt.Sprintf("%s-%s", pod.Name, defaultCtrName)
}
@@ -544,6 +635,19 @@ func getVolume(vType, vPath string) *Volume {
}
}
+type Env struct {
+ Name string
+ Value string
+ ValueFrom string
+ RefName string
+ RefKey string
+}
+
+type EnvFrom struct {
+ Name string
+ From string
+}
+
var _ = Describe("Podman generate kube", func() {
var (
tempdir string
@@ -581,7 +685,7 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman play kube fail with nonexist authfile", func() {
- err := generatePodKubeYaml(getPod(), kubeYaml)
+ err := generateKubeYaml("pod", getPod(), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", "--authfile", "/tmp/nonexist", kubeYaml})
@@ -592,7 +696,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct command", func() {
pod := getPod()
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -609,7 +713,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct command with only set command in yaml file", func() {
pod := getPod(withCtr(getCtr(withCmd([]string{"echo", "hello"}), withArg(nil))))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -626,7 +730,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct command with only set args in yaml file", func() {
pod := getPod(withCtr(getCtr(withImage(redis), withCmd(nil), withArg([]string{"echo", "hello"}))))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -644,7 +748,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct output", func() {
p := getPod(withCtr(getCtr(withCmd([]string{"echo", "hello"}), withArg([]string{"world"}))))
- err := generatePodKubeYaml(p, kubeYaml)
+ err := generateKubeYaml("pod", p, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -665,14 +769,14 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test restartPolicy", func() {
// podName, set, expect
testSli := [][]string{
- {"testPod1", "", "always"}, // Default eqaul to always
+ {"testPod1", "", "always"}, // Default equal to always
{"testPod2", "Always", "always"},
{"testPod3", "OnFailure", "on-failure"},
{"testPod4", "Never", "no"},
}
for _, v := range testSli {
pod := getPod(withPodName(v[0]), withRestartPolicy(v[1]))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -686,9 +790,52 @@ var _ = Describe("Podman generate kube", func() {
}
})
+ It("podman play kube test env value from configmap", func() {
+ SkipIfRemote("configmap list is not supported as a param")
+ cmYamlPathname := filepath.Join(podmanTest.TempDir, "foo-cm.yaml")
+ cm := getConfigMap(withConfigMapName("foo"), withConfigMapData("FOO", "foo"))
+ err := generateKubeYaml("configmap", cm, cmYamlPathname)
+ Expect(err).To(BeNil())
+
+ pod := getPod(withCtr(getCtr(withEnv("FOO", "", "configmap", "foo", "FOO"))))
+ err = generateKubeYaml("pod", pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml, "--configmap", cmYamlPathname})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "'{{ .Config.Env }}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(`FOO=foo`))
+ })
+
+ It("podman play kube test get all key-value pairs from configmap as envs", func() {
+ SkipIfRemote("configmap list is not supported as a param")
+ cmYamlPathname := filepath.Join(podmanTest.TempDir, "foo-cm.yaml")
+ cm := getConfigMap(withConfigMapName("foo"), withConfigMapData("FOO1", "foo1"), withConfigMapData("FOO2", "foo2"))
+ err := generateKubeYaml("configmap", cm, cmYamlPathname)
+ Expect(err).To(BeNil())
+
+ pod := getPod(withCtr(getCtr(withEnvFrom("foo", "configmap"))))
+ err = generateKubeYaml("pod", pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml, "--configmap", cmYamlPathname})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "'{{ .Config.Env }}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(`FOO1=foo1`))
+ Expect(inspect.OutputToString()).To(ContainSubstring(`FOO2=foo2`))
+ })
+
It("podman play kube test hostname", func() {
pod := getPod()
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -704,7 +851,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test with customized hostname", func() {
hostname := "myhostname"
pod := getPod(withHostname(hostname))
- err := generatePodKubeYaml(getPod(withHostname(hostname)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withHostname(hostname)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -727,7 +874,7 @@ var _ = Describe("Podman generate kube", func() {
"test4.podman.io",
}),
)
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -746,7 +893,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCapAdd([]string{capAdd}), withCmd([]string{"cat", "/proc/self/status"}), withArg(nil))
pod := getPod(withCtr(ctr))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -764,7 +911,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCapDrop([]string{capDrop}))
pod := getPod(withCtr(ctr))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -780,7 +927,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube no security context", func() {
// expect play kube to not fail if no security context is specified
pod := getPod(withCtr(getCtr(withSecurityContext(false))))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -805,7 +952,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCmd([]string{"pwd"}), withArg(nil))
pod := getPod(withCtr(ctr), withAnnotation(ctrAnnotation, "localhost/"+filepath.Base(jsonFile)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
// CreateSeccompJson will put the profile into podmanTest.TempDir. Use --seccomp-profile-root to tell play kube where to look
@@ -832,7 +979,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCmd([]string{"pwd"}), withArg(nil))
pod := getPod(withCtr(ctr), withAnnotation("seccomp.security.alpha.kubernetes.io/pod", "localhost/"+filepath.Base(jsonFile)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
// CreateSeccompJson will put the profile into podmanTest.TempDir. Use --seccomp-profile-root to tell play kube where to look
@@ -848,7 +995,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube with pull policy of never should be 125", func() {
ctr := getCtr(withPullPolicy("never"), withImage(BB_GLIBC))
- err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -858,7 +1005,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube with pull policy of missing", func() {
ctr := getCtr(withPullPolicy("missing"), withImage(BB))
- err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -884,7 +1031,7 @@ var _ = Describe("Podman generate kube", func() {
oldBBinspect := inspect.InspectImageJSON()
ctr := getCtr(withPullPolicy("always"), withImage(BB))
- err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -915,7 +1062,7 @@ var _ = Describe("Podman generate kube", func() {
oldBBinspect := inspect.InspectImageJSON()
ctr := getCtr(withImage(BB))
- err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -973,7 +1120,7 @@ spec:
// Deployment related tests
It("podman play kube deployment 1 replica test correct command", func() {
deployment := getDeployment()
- err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ err := generateKubeYaml("deployment", deployment, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -993,7 +1140,7 @@ spec:
var i, numReplicas int32
numReplicas = 5
deployment := getDeployment(withReplicas(numReplicas))
- err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ err := generateKubeYaml("deployment", deployment, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1016,7 +1163,7 @@ spec:
ctr := getCtr(withHostIP(ip, port), withImage(BB))
pod := getPod(withCtr(ctr))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1033,7 +1180,7 @@ spec:
hostPathLocation := filepath.Join(tempdir, "file")
pod := getPod(withVolume(getVolume(`""`, hostPathLocation)))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1048,7 +1195,7 @@ spec:
f.Close()
pod := getPod(withVolume(getVolume(`""`, hostPathLocation)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1060,7 +1207,7 @@ spec:
hostPathLocation := filepath.Join(tempdir, "file")
pod := getPod(withVolume(getVolume("File", hostPathLocation)))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1075,7 +1222,7 @@ spec:
f.Close()
pod := getPod(withVolume(getVolume("File", hostPathLocation)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1087,7 +1234,7 @@ spec:
hostPathLocation := filepath.Join(tempdir, "file")
pod := getPod(withVolume(getVolume("FileOrCreate", hostPathLocation)))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1103,7 +1250,7 @@ spec:
hostPathLocation := filepath.Join(tempdir, "file")
pod := getPod(withVolume(getVolume("DirectoryOrCreate", hostPathLocation)))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1123,7 +1270,7 @@ spec:
f.Close()
pod := getPod(withVolume(getVolume("Socket", hostPathLocation)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1139,7 +1286,7 @@ spec:
ctr := getCtr(withVolumeMount(hostPathLocation, true), withImage(BB))
pod := getPod(withVolume(getVolume("File", hostPathLocation)), withCtr(ctr))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1162,7 +1309,7 @@ spec:
withReplicas(numReplicas),
withPod(getPod(withLabel(expectedLabelKey, expectedLabelValue))),
)
- err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ err := generateKubeYaml("deployment", deployment, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go
index 7eff8c6ed..524c07cc6 100644
--- a/test/e2e/rm_test.go
+++ b/test/e2e/rm_test.go
@@ -236,7 +236,6 @@ var _ = Describe("Podman rm", func() {
})
It("podman rm --ignore bogus container and a running container", func() {
-
session := podmanTest.RunTopContainer("test1")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index cd32e5a77..e6bba9f67 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -394,7 +394,7 @@ USER bin`
})
It("podman run sysctl test", func() {
- SkipIfRootless("Network sysctls are not avalable root rootless")
+ SkipIfRootless("Network sysctls are not available root rootless")
session := podmanTest.Podman([]string{"run", "--rm", "--sysctl", "net.core.somaxconn=65535", ALPINE, "sysctl", "net.core.somaxconn"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/system/055-rm.bats b/test/system/055-rm.bats
index c8475c3e9..7176ae4b8 100644
--- a/test/system/055-rm.bats
+++ b/test/system/055-rm.bats
@@ -33,6 +33,21 @@ load helpers
run_podman rm -f $cid
}
+@test "podman rm container from storage" {
+ if is_remote; then
+ skip "only applicable for local podman"
+ fi
+ rand=$(random_string 30)
+ run_podman create --name $rand $IMAGE /bin/true
+
+ # Create a container that podman does not know about
+ run buildah from $IMAGE
+ cid="$output"
+
+ # rm should succeed
+ run_podman rm $rand $cid
+}
+
# I'm sorry! This test takes 13 seconds. There's not much I can do about it,
# please know that I think it's justified: podman 1.5.0 had a strange bug
# in with exit status was not preserved on some code paths with 'rm -f'