summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml2
-rw-r--r--cmd/podman/containers/ps.go53
-rw-r--r--cmd/podman/containers/rm.go13
-rw-r--r--cmd/podman/play/kube.go1
-rw-r--r--cmd/podman/volumes/list.go54
-rw-r--r--contrib/cirrus/lib.sh29
-rwxr-xr-xcontrib/cirrus/runner.sh3
-rwxr-xr-xcontrib/cirrus/setup_environment.sh7
-rwxr-xr-xcontrib/cirrus/shellcheck.sh2
-rw-r--r--docs/source/markdown/podman-play-kube.1.md15
-rw-r--r--docs/source/markdown/podman-rm.1.md9
-rwxr-xr-xhack/get_ci_vm.sh330
-rw-r--r--libpod/image/docker_registry_options.go1
-rw-r--r--libpod/kube.go40
-rw-r--r--nix/nixpkgs.json6
-rw-r--r--pkg/domain/entities/containers.go1
-rw-r--r--pkg/domain/entities/play.go2
-rw-r--r--pkg/domain/infra/abi/containers.go28
-rw-r--r--pkg/domain/infra/abi/play.go89
-rw-r--r--pkg/domain/infra/abi/play_test.go254
-rw-r--r--test/apiv2/20-containers.at2
-rw-r--r--test/e2e/generate_kube_test.go32
-rw-r--r--test/e2e/network_test.go2
-rw-r--r--test/e2e/play_kube_test.go235
-rw-r--r--test/e2e/ps_test.go12
-rw-r--r--test/e2e/rm_test.go1
-rw-r--r--test/e2e/run_test.go2
-rw-r--r--test/e2e/volume_ls_test.go7
-rw-r--r--test/system/055-rm.bats15
29 files changed, 888 insertions, 359 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index b23ec1a90..da33c81e2 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -47,7 +47,7 @@ env:
TEST_ENVIRON: host # 'host' or 'container'
PODBIN_NAME: podman # 'podman' or 'remote'
PRIV_NAME: root # 'root' or 'rootless'
- DISTRO_NV: $FEDORA_NAME # any {PRIOR_,}{FEDORA,UBUNTU}_NAME value
+ DISTRO_NV: # any {PRIOR_,}{FEDORA,UBUNTU}_NAME value
VM_IMAGE_NAME: # One of the "Google-cloud VM Images" (above)
CTR_FQIN: # One of the "Container FQIN's" (above)
diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go
index c4c8b60f3..8082a74c2 100644
--- a/cmd/podman/containers/ps.go
+++ b/cmd/podman/containers/ps.go
@@ -12,7 +12,9 @@ import (
tm "github.com/buger/goterm"
"github.com/containers/buildah/pkg/formats"
+ "github.com/containers/podman/v2/cmd/podman/parse"
"github.com/containers/podman/v2/cmd/podman/registry"
+ "github.com/containers/podman/v2/cmd/podman/report"
"github.com/containers/podman/v2/cmd/podman/utils"
"github.com/containers/podman/v2/cmd/podman/validate"
"github.com/containers/podman/v2/pkg/domain/entities"
@@ -176,47 +178,51 @@ func ps(cmd *cobra.Command, args []string) error {
return err
}
}
- if listOpts.Format == "json" {
+
+ switch {
+ case parse.MatchesJSONFormat(listOpts.Format):
return jsonOut(listContainers)
- }
- if listOpts.Quiet {
+ case listOpts.Quiet:
return quietOut(listContainers)
}
+ // Output table Watch > 0 will refresh screen
responses := make([]psReporter, 0, len(listContainers))
for _, r := range listContainers {
responses = append(responses, psReporter{r})
}
- headers, format := createPsOut()
- if cmd.Flag("format").Changed {
- format = strings.TrimPrefix(listOpts.Format, "table ")
- if !strings.HasPrefix(format, "\n") {
- format += "\n"
- }
- }
- format = "{{range . }}" + format + "{{end}}"
- if !listOpts.Quiet && !cmd.Flag("format").Changed {
- format = headers + format
+ var headers, format string
+ if cmd.Flags().Changed("format") {
+ headers = ""
+ format = report.NormalizeFormat(listOpts.Format)
+ } else {
+ headers, format = createPsOut()
}
+ format = headers + "{{range . }}" + format + "{{end}}"
+
tmpl, err := template.New("listContainers").Parse(format)
if err != nil {
return err
}
w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
+ defer w.Flush()
+
if listOpts.Watch > 0 {
for {
var responses []psReporter
tm.Clear()
tm.MoveCursor(1, 1)
tm.Flush()
- listContainers, err := getResponses()
- for _, r := range listContainers {
- responses = append(responses, psReporter{r})
- }
- if err != nil {
+
+ if ctnrs, err := getResponses(); err != nil {
return err
+ } else {
+ for _, r := range ctnrs {
+ responses = append(responses, psReporter{r})
+ }
}
+
if err := tmpl.Execute(w, responses); err != nil {
return err
}
@@ -232,11 +238,11 @@ func ps(cmd *cobra.Command, args []string) error {
if err := tmpl.Execute(w, responses); err != nil {
return err
}
- return w.Flush()
}
return nil
}
+// cannot use report.Headers() as it doesn't support structures as fields
func createPsOut() (string, string) {
var row string
if listOpts.Namespace {
@@ -257,12 +263,9 @@ func createPsOut() (string, string) {
headers += "\tSIZE"
row += "\t{{.Size}}"
}
- if !strings.HasSuffix(headers, "\n") {
- headers += "\n"
- }
- if !strings.HasSuffix(row, "\n") {
- row += "\n"
- }
+
+ headers = report.NormalizeFormat(headers)
+ row = report.NormalizeFormat(row)
return headers, row
}
diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go
index f8f12234d..a7739b3ba 100644
--- a/cmd/podman/containers/rm.go
+++ b/cmd/podman/containers/rm.go
@@ -57,13 +57,12 @@ func rmFlags(flags *pflag.FlagSet) {
flags.BoolVarP(&rmOptions.All, "all", "a", false, "Remove all containers")
flags.BoolVarP(&rmOptions.Ignore, "ignore", "i", false, "Ignore errors when a specified container is missing")
flags.BoolVarP(&rmOptions.Force, "force", "f", false, "Force removal of a running or unusable container. The default is false")
- flags.BoolVar(&rmOptions.Storage, "storage", false, "Remove container from storage library")
flags.BoolVarP(&rmOptions.Volumes, "volumes", "v", false, "Remove anonymous volumes associated with the container")
flags.StringArrayVarP(&rmOptions.CIDFiles, "cidfile", "", nil, "Read the container ID from the file")
- if registry.IsRemote() {
- _ = flags.MarkHidden("ignore")
- _ = flags.MarkHidden("cidfile")
+ if !registry.IsRemote() {
+ // This option is deprecated, but needs to still exists for backwards compatibility
+ flags.Bool("storage", false, "Remove container from storage library")
_ = flags.MarkHidden("storage")
}
}
@@ -97,12 +96,6 @@ func removeContainers(namesOrIDs []string, rmOptions entities.RmOptions, setExit
var (
errs utils.OutputErrors
)
- // Storage conflicts with --all/--latest/--volumes/--cidfile/--ignore
- if rmOptions.Storage {
- if rmOptions.All || rmOptions.Ignore || rmOptions.Latest || rmOptions.Volumes || rmOptions.CIDFiles != nil {
- return errors.Errorf("--storage conflicts with --volumes, --all, --latest, --ignore and --cidfile")
- }
- }
responses, err := registry.ContainerEngine().ContainerRm(context.Background(), namesOrIDs, rmOptions)
if err != nil {
if setExit {
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index 54a6d0677..976d720ee 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -60,6 +60,7 @@ func init() {
flags.BoolVar(&kubeOptions.TLSVerifyCLI, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
flags.StringVar(&kubeOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.StringVar(&kubeOptions.SeccompProfileRoot, "seccomp-profile-root", defaultSeccompRoot, "Directory path for seccomp profiles")
+ flags.StringSliceVar(&kubeOptions.ConfigMaps, "configmap", []string{}, "`Pathname` of a YAML file containing a kubernetes configmap")
}
_ = flags.MarkHidden("signature-policy")
}
diff --git a/cmd/podman/volumes/list.go b/cmd/podman/volumes/list.go
index d198e51a7..18765a499 100644
--- a/cmd/podman/volumes/list.go
+++ b/cmd/podman/volumes/list.go
@@ -3,13 +3,14 @@ package volumes
import (
"context"
"fmt"
- "io"
"os"
"strings"
"text/tabwriter"
"text/template"
+ "github.com/containers/podman/v2/cmd/podman/parse"
"github.com/containers/podman/v2/cmd/podman/registry"
+ "github.com/containers/podman/v2/cmd/podman/report"
"github.com/containers/podman/v2/cmd/podman/validate"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/pkg/errors"
@@ -55,7 +56,6 @@ func init() {
}
func list(cmd *cobra.Command, args []string) error {
- var w io.Writer = os.Stdout
if cliOpts.Quiet && cmd.Flag("format").Changed {
return errors.New("quiet and format flags cannot be used together")
}
@@ -73,40 +73,40 @@ func list(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
- if cliOpts.Format == "json" {
- return outputJSON(responses)
- }
- if len(responses) < 1 {
+ switch {
+ case parse.MatchesJSONFormat(cliOpts.Format):
+ return outputJSON(responses)
+ case len(responses) < 1:
return nil
}
- // "\t" from the command line is not being recognized as a tab
- // replacing the string "\t" to a tab character if the user passes in "\t"
- cliOpts.Format = strings.Replace(cliOpts.Format, `\t`, "\t", -1)
+ return outputTemplate(cmd, responses)
+}
+
+func outputTemplate(cmd *cobra.Command, responses []*entities.VolumeListReport) error {
+ headers := report.Headers(entities.VolumeListReport{}, map[string]string{
+ "Name": "VOLUME NAME",
+ })
+
+ row := report.NormalizeFormat(cliOpts.Format)
if cliOpts.Quiet {
- cliOpts.Format = "{{.Name}}\n"
+ row = "{{.Name}}\n"
}
- headers := "DRIVER\tVOLUME NAME\n"
- row := cliOpts.Format
- if !strings.HasSuffix(cliOpts.Format, "\n") {
- row += "\n"
- }
- format := "{{range . }}" + row + "{{end}}"
- if !cliOpts.Quiet && !cmd.Flag("format").Changed {
- w = tabwriter.NewWriter(os.Stdout, 12, 2, 2, ' ', 0)
- format = headers + format
- }
- tmpl, err := template.New("listVolume").Parse(format)
+ row = "{{range . }}" + row + "{{end}}"
+
+ tmpl, err := template.New("list volume").Parse(row)
if err != nil {
return err
}
- if err := tmpl.Execute(w, responses); err != nil {
- return err
- }
- if flusher, ok := w.(interface{ Flush() error }); ok {
- return flusher.Flush()
+ w := tabwriter.NewWriter(os.Stdout, 12, 2, 2, ' ', 0)
+ defer w.Flush()
+
+ if !cliOpts.Quiet && !cmd.Flag("format").Changed {
+ if err := tmpl.Execute(w, headers); err != nil {
+ return errors.Wrapf(err, "failed to write report column headers")
+ }
}
- return nil
+ return tmpl.Execute(w, responses)
}
func outputJSON(vols []*entities.VolumeListReport) error {
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index 23987938b..e5124d8e4 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -6,18 +6,20 @@
# BEGIN Global export of all variables
set -a
-# Due to differences across platforms and runtime execution environments,
-# handling of the (otherwise) default shell setup is non-uniform. Rather
-# than attempt to workaround differences, simply force-load/set required
-# items every time this library is utilized.
-source /etc/profile
-source /etc/environment
-USER="$(whoami)"
-HOME="$(getent passwd $USER | cut -d : -f 6)"
-# Some platforms set and make this read-only
-[[ -n "$UID" ]] || \
- UID=$(getent passwd $USER | cut -d : -f 3)
-GID=$(getent passwd $USER | cut -d : -f 4)
+if [[ "$CI" == "true" ]]; then
+ # Due to differences across platforms and runtime execution environments,
+ # handling of the (otherwise) default shell setup is non-uniform. Rather
+ # than attempt to workaround differences, simply force-load/set required
+ # items every time this library is utilized.
+ source /etc/profile
+ source /etc/environment
+ USER="$(whoami)"
+ HOME="$(getent passwd $USER | cut -d : -f 6)"
+ # Some platforms set and make this read-only
+ [[ -n "$UID" ]] || \
+ UID=$(getent passwd $USER | cut -d : -f 3)
+ GID=$(getent passwd $USER | cut -d : -f 4)
+fi
# During VM Image build, the 'containers/automation' installation
# was performed. The final step of that installation sets the
@@ -43,6 +45,9 @@ OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | cut -d '.' -f 1)"
# Combined to ease soe usage
OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
+# This is normally set from .cirrus.yml but default is necessary when
+# running under hack/get_ci_vm.sh since it cannot infer the value.
+DISTRO_NV="${DISTRO_NV:-$OS_REL_VER}"
# Essential default paths, many are overridden when executing under Cirrus-CI
GOPATH="${GOPATH:-/var/tmp/go}"
diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh
index 8a85acbd1..bfac8e7cb 100755
--- a/contrib/cirrus/runner.sh
+++ b/contrib/cirrus/runner.sh
@@ -139,6 +139,9 @@ function _run_vendor() {
}
function _run_build() {
+ # Ensure always start from clean-slate with all vendor modules downloaded
+ make clean
+ make vendor
make podman-release
make podman-remote-linux-release
}
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index c064b6840..156c9b7b2 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -67,9 +67,8 @@ case "$CG_FS_TYPE" in
*) die_unknown CG_FS_TYPE
esac
-# Required to be defined by caller: Which distribution are we testing on
-# shellcheck disable=SC2154
-case "$DISTRO_NV" in
+# Which distribution are we testing on.
+case "$OS_RELEASE_ID" in
ubuntu*) ;;
fedora*)
if ((CONTAINER==0)); then # Not yet running inside a container
@@ -83,7 +82,7 @@ case "$DISTRO_NV" in
setsebool container_manage_cgroup true
fi
;;
- *) die_unknown DISTRO_NV
+ *) die_unknown OS_RELEASE_ID
esac
# Required to be defined by caller: The environment where primary testing happens
diff --git a/contrib/cirrus/shellcheck.sh b/contrib/cirrus/shellcheck.sh
index edf8248d3..667d30c91 100755
--- a/contrib/cirrus/shellcheck.sh
+++ b/contrib/cirrus/shellcheck.sh
@@ -11,6 +11,6 @@ shellcheck --color=always --format=tty \
--enable add-default-case,avoid-nullary-conditions,check-unassigned-uppercase \
--exclude SC2046,SC2034,SC2090,SC2064 \
--wiki-link-count=0 --severity=warning \
- $SCRIPT_BASE/*.sh
+ $SCRIPT_BASE/*.sh hack/get_ci_vm.sh
echo "Shellcheck: PASS"
diff --git a/docs/source/markdown/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md
index dd9441800..519b153f4 100644
--- a/docs/source/markdown/podman-play-kube.1.md
+++ b/docs/source/markdown/podman-play-kube.1.md
@@ -30,6 +30,12 @@ environment variable. `export REGISTRY_AUTH_FILE=path`
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
+**--configmap**=*path*
+
+Use Kubernetes configmap YAML at path to provide a source for environment variable values within the containers of the pod.
+
+Note: The *--configmap* option can be used multiple times or a comma-separated list of paths can be used to pass multiple Kubernetes configmap YAMLs.
+
**--creds**
The [username[:password]] to use to authenticate with the registry if required.
@@ -66,6 +72,15 @@ $ podman play kube demo.yml
52182811df2b1e73f36476003a66ec872101ea59034ac0d4d3a7b40903b955a6
```
+Provide `configmap-foo.yml` and `configmap-bar.yml` as sources for environment variables within the containers.
+```
+$ podman play kube demo.yml --configmap configmap-foo.yml,configmap-bar.yml
+52182811df2b1e73f36476003a66ec872101ea59034ac0d4d3a7b40903b955a6
+
+$ podman play kube demo.yml --configmap configmap-foo.yml --configmap configmap-bar.yml
+52182811df2b1e73f36476003a66ec872101ea59034ac0d4d3a7b40903b955a6
+```
+
CNI network(s) can be specified as comma-separated list using ``--network``
```
$ podman play kube demo.yml --network cni1,cni2
diff --git a/docs/source/markdown/podman-rm.1.md b/docs/source/markdown/podman-rm.1.md
index e3e6740df..36904a128 100644
--- a/docs/source/markdown/podman-rm.1.md
+++ b/docs/source/markdown/podman-rm.1.md
@@ -43,13 +43,6 @@ to run containers such as CRI-O, the last started container could be from either
The latest option is not supported on the remote client.
-**--storage**
-
-Remove external containers from the storage library.
-This is only possible with containers that are not present in libpod can be seen by **podman ps --all --storage**).
-It is used to remove external containers from **podman build** and **buildah**, and orphan containers which were only partially removed by **podman rm**.
-The storage option conflicts with the **--all**, **--latest**, and **--volumes** options.
-
**--volumes**, **-v**
Remove anonymous volumes associated with the container. This does not include named volumes
@@ -96,7 +89,7 @@ $ podman rm -f --latest
**125** The command fails for any other reason
## SEE ALSO
-podman(1), podman-image-rm(1), podman-ps(1), podman-build(1)
+podman(1), podman-image-rm(1), podman-ps(1), podman-build(1), buildah(1), cri-o(1)
## HISTORY
August 2017, Originally compiled by Ryan Cole <rycole@redhat.com>
diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh
index adf3b1bf2..f8c7e792e 100755
--- a/hack/get_ci_vm.sh
+++ b/hack/get_ci_vm.sh
@@ -1,49 +1,82 @@
#!/usr/bin/env bash
+#
+# For help and usage information, simply execute the script w/o any arguments.
+#
+# This script is intended to be run by podman developers who need to debug
+# problems specifically related to Cirrus-CI automated testing. However,
+# because it's only loosely coupled to the `.cirrus.yml` configuration, it must
+# orchestrate VMs in GCP directly. This means users need to have
+# pre-authorization (access) to manipulate google-cloud resoures. Additionally,
+# there are no guarantees it will remain in-sync with other automation-related
+# scripts. Therefore it may not always function for everybody in every
+# future scenario without updates/modifications/tweaks.
+
set -e
-RED="\e[1;36;41m"
-YEL="\e[1;33;44m"
+RED="\e[1;31m"
+YEL="\e[1;32m"
NOR="\e[0m"
USAGE_WARNING="
-${YEL}WARNING: This will not work without local sudo access to run podman,${NOR}
- ${YEL}and prior authorization to use the libpod GCP project. Also,${NOR}
- ${YEL}possession of the proper ssh private key is required.${NOR}
+${YEL}WARNING: This will not work without podman,${NOR}
+ ${YEL}and prior authorization to use the libpod GCP project.${NOR}
"
-# TODO: Many/most of these values should come from .cirrus.yml
+# These values come from .cirrus.yml gce_instance clause
ZONE="${ZONE:-us-central1-a}"
CPUS="2"
MEMORY="4Gb"
DISK="200"
PROJECT="libpod-218412"
GOSRC="/var/tmp/go/src/github.com/containers/podman"
-GCLOUD_IMAGE=${GCLOUD_IMAGE:-quay.io/cevich/gcloud_centos:latest}
-GCLOUD_SUDO=${GCLOUD_SUDO-sudo}
+GIT_REPO="https://github.com/containers/podman.git"
+
+# Container image with necessary runtime elements
+GCLOUD_IMAGE="${GCLOUD_IMAGE:-docker.io/google/cloud-sdk:alpine}"
+GCLOUD_CFGDIR=".config/gcloud"
+
+SCRIPT_FILENAME=$(basename ${BASH_SOURCE[0]})
+HOOK_FILENAME="hook_${SCRIPT_FILENAME}"
# Shared tmp directory between container and us
-TMPDIR=$(mktemp -d --tmpdir $(basename $0)_tmpdir_XXXXXX)
+TMPDIR=$(mktemp -d --tmpdir ${SCRIPT_FILENAME}_tmpdir_XXXXXX)
-LIBPODROOT=$(realpath "$(dirname $0)/../")
+show_usage() {
+ echo -e "\n${RED}ERROR: $1${NOR}"
+ echo -e "${YEL}Usage: $SCRIPT_FILENAME <image_name>${NOR}"
+ echo ""
+ if [[ -r ".cirrus.yml" ]]
+ then
+ echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}"
+ image_hints
+ echo ""
+ echo -e "${YEL}Optional:${NOR} If a $HOME/$GCLOUD_CFGDIR/$HOOK_FILENAME executable exists during"
+ echo "VM creation, it will be executed remotely after cloning"
+ echo "$GIT_REPO. The"
+ echo "current local working branch name and commit ID, will be provided as"
+ echo "it's arguments."
+ fi
+ exit 1
+}
+
+LIBPODROOT=$(realpath "$(dirname ${BASH_SOURCE[0]})/../")
# else: Assume $PWD is the root of the libpod repository
-[[ "$LIBPODROOT" != "/" ]] || LIBPODROOT=$PWD
+[[ "$LIBPODROOT" != "/" ]] || \
+ show_usage "Must execute script from within clone of containers/podman repo."
-# Command shortcuts save some typing (assumes $LIBPODROOT is subdir of $HOME)
-PGCLOUD="$GCLOUD_SUDO podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v $TMPDIR:$HOME -v $HOME/.config/gcloud:$HOME/.config/gcloud -v $HOME/.config/gcloud/ssh:$HOME/.ssh -v $LIBPODROOT:$LIBPODROOT $GCLOUD_IMAGE --configuration=libpod --project=$PROJECT"
-SCP_CMD="$PGCLOUD compute scp"
+[[ "$UID" -ne 0 ]] || \
+ show_usage "Must execute script as a regular (non-root) user."
+
+[[ "${LIBPODROOT#$HOME}" != "$LIBPODROOT" ]] || \
+ show_usage "Clone of containers/podman must be a subdirectory of \$HOME ($HOME)"
+# Disable SELinux labeling to allow read-only mounting of repository files
+PGCLOUD="podman run -it --rm --security-opt label=disable -v $TMPDIR:$TMPDIR -v $HOME/.config/gcloud:/root/.config/gcloud -v $HOME/.config/gcloud/ssh:/root/.ssh -v $LIBPODROOT:$LIBPODROOT:ro $GCLOUD_IMAGE gcloud --configuration=libpod --project=$PROJECT"
+SCP_CMD="$PGCLOUD compute scp"
showrun() {
- if [[ "$1" == "--background" ]]
- then
- shift
- # Properly escape any nested spaces, so command can be copy-pasted
- echo '+ '$(printf " %q" "$@")' &' > /dev/stderr
- "$@" &
- echo -e "${RED}<backgrounded>${NOR}"
- else
- echo '+ '$(printf " %q" "$@") > /dev/stderr
- "$@"
- fi
+ echo '+ '$(printf " %q" "$@") > /dev/stderr
+ echo ""
+ "$@"
}
cleanup() {
@@ -52,6 +85,7 @@ cleanup() {
wait
# set GCLOUD_DEBUG to leave tmpdir behind for postmortem
+ # shellcheck disable=SC2154
test -z "$GCLOUD_DEBUG" && rm -rf $TMPDIR
# Not always called from an exit handler, but should always exit when called
@@ -61,32 +95,18 @@ trap cleanup EXIT
delvm() {
echo -e "\n"
- echo -e "\n${YEL}Offering to Delete $VMNAME ${RED}(Might take a minute or two)${NOR}"
- echo -e "\n${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}"
+ echo -e "\n${YEL}Offering to Delete $VMNAME${NOR}"
+ echo -e "${RED}(Deletion might take a minute or two)${NOR}"
+ echo -e "${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}"
showrun $CLEANUP_CMD # prompts for Yes/No
cleanup
}
-show_usage() {
- echo -e "\n${RED}ERROR: $1${NOR}"
- echo -e "${YEL}Usage: $(basename $0) [-m <SPECIALMODE>] [-u <ROOTLESS_USER> ] <image_name>${NOR}"
- echo "Use -m <SPECIALMODE> with a supported value documented in contrib/cirrus/README.md."
- echo "With '-m rootless' must also specify -u <ROOTLESS_USER> with name of user to create & use"
- echo ""
- if [[ -r ".cirrus.yml" ]]
- then
- echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}"
- image_hints
- echo ""
- fi
- exit 1
-}
-
get_env_vars() {
# Deal with both YAML and embedded shell-like substitutions in values
# if substitution fails, fall back to printing naked env. var as-is.
python3 -c '
-import yaml,re
+import sys,yaml,re
env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"]
dollar_env_var=re.compile(r"\$(\w+)")
dollarcurly_env_var=re.compile(r"\$\{(\w+)\}")
@@ -98,11 +118,10 @@ class ReIterKey(dict):
rep=r"{\1}" # Convert env vars markup to -> str.format_map(re_iter_key) markup
out=ReIterKey()
for k,v in env.items():
- v=str(v)
- if "ENCRYPTED" not in v:
- out[k]=dollar_env_var.sub(rep, dollarcurly_env_var.sub(rep, v))
+ if "ENCRYPTED" not in str(v) and bool(v):
+ out[k]=dollar_env_var.sub(rep, dollarcurly_env_var.sub(rep, str(v)))
for k,v in out.items():
- print("{0}=\"{1}\"".format(k, v.format_map(out)))
+ sys.stdout.write("{0}=\"{1}\"\n".format(k, str(v).format_map(out)))
'
}
@@ -110,8 +129,14 @@ image_hints() {
get_env_vars | fgrep '_CACHE_IMAGE_NAME' | awk -F "=" '{print $2}'
}
-
+unset VM_IMAGE_NAME
+unset VMNAME
+unset CREATE_CMD
+unset SSH_CMD
+unset CLEANUP_CMD
+declare -xa ENVS
parse_args(){
+ local arg
echo -e "$USAGE_WARNING"
if [[ "$USER" =~ "root" ]]
@@ -119,86 +144,41 @@ parse_args(){
show_usage "This script must be run as a regular user."
fi
- ENVS="$(get_env_vars)"
- [[ "$#" -ge "1" ]] || \
- show_usage "Must specify at least one command-line parameter."
-
- IMAGE_NAME=""
- ROOTLESS_USER=""
- SPECIALMODE="none"
- for arg
- do
- if [[ "$SPECIALMODE" == "GRABNEXT" ]] && [[ "${arg:0:1}" != "-" ]]
- then
- SPECIALMODE="$arg"
- echo -e "${YEL}Using \$SPECIALMODE=$SPECIALMODE.${NOR}"
- continue
- elif [[ "$ROOTLESS_USER" == "GRABNEXT" ]] && [[ "${arg:0:1}" != "-" ]]
- then
- ROOTLESS_USER="$arg"
- echo -e "${YEL}Using \$ROOTLESS_USER=$ROOTLESS_USER.${NOR}"
- continue
- fi
- case "$arg" in
- -m)
- SPECIALMODE="GRABNEXT"
- ;;
- -u)
- ROOTLESS_USER="GRABNEXT"
- ;;
- *)
- [[ "${arg:0:1}" != "-" ]] || \
- show_usage "Unknown command-line option '$arg'."
- [[ -z "$IMAGE_NAME" ]] || \
- show_usage "Must specify exactly one image name, got '$IMAGE_NAME' and '$arg'."
- IMAGE_NAME="$arg"
- ;;
- esac
- done
+ [[ "$#" -eq 1 ]] || \
+ show_usage "Must specify a VM Image name to use, and the test flavor."
- if [[ "$SPECIALMODE" == "GRABNEXT" ]]
- then
- show_usage "Must specify argument to -m option."
- fi
+ VM_IMAGE_NAME="$1"
- if [[ "$ROOTLESS_USER" == "GRABNEXT" ]]
- then
- show_usage "Must specify argument to -u option."
- fi
+ # Word-splitting is desireable in this case
+ # shellcheck disable=SC2207
+ ENVS=(
+ $(get_env_vars)
+ "VM_IMAGE_NAME=$VM_IMAGE_NAME"
+ )
- if [[ -z "$IMAGE_NAME" ]]
- then
- show_usage "No image-name specified."
- fi
+ VMNAME="${VMNAME:-${USER}-${VM_IMAGE_NAME}}"
- if [[ "$SPECIALMODE" == "rootless" ]] && [[ -z "$ROOTLESS_USER" ]]
- then
- show_usage "With '-m rootless' must also pass -u <username> of rootless user."
- fi
-
- if echo "$IMAGE_NAME" | grep -q "image-builder-image"
- then
- echo -e "Creating an image-builder VM, I hope you know what you're doing.\n"
- IBI_ARGS="--scopes=compute-rw,storage-rw,userinfo-email"
- SSHUSER="centos"
- else
- unset IBI_ARGS
- SSHUSER="root"
- fi
+ CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image=${VM_IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $VMNAME"
- ENVS="$ENVS SPECIALMODE=\"$SPECIALMODE\""
+ SSH_CMD="$PGCLOUD compute ssh root@$VMNAME"
- [[ -z "$ROOTLESS_USER" ]] || \
- ENVS="$ENVS ROOTLESS_USER=$ROOTLESS_USER"
-
- SETUP_CMD="env $ENVS ADD_SECOND_PARTITIO=True $GOSRC/contrib/cirrus/setup_environment.sh"
- VMNAME="${VMNAME:-${USER}-${IMAGE_NAME}}"
+ CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME"
+}
- CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image=${IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $IBI_ARGS $VMNAME"
+# Returns true if user has run an 'init' and has a valid token for
+# the specific project-id and named-configuration argumens in $PGCLOUD.
+function has_valid_credentials() {
+ if $PGCLOUD info |& grep -Eq 'Account:.*None'; then
+ return 1
+ fi
- SSH_CMD="$PGCLOUD compute ssh $SSHUSER@$VMNAME"
+ # It's possible for 'gcloud info' to list expired credentials,
+ # e.g. 'ERROR: ... invalid grant: Bad Request'
+ if $PGCLOUD auth print-access-token |& grep -q 'ERROR'; then
+ return 1
+ fi
- CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME"
+ return 0
}
##### main
@@ -209,23 +189,17 @@ parse_args(){
cd "$LIBPODROOT"
parse_args "$@"
-
-# Ensure mount-points and data directories exist on host as $USER. Also prevents
-# permission-denied errors during cleanup() b/c `sudo podman` created mount-points
-# owned by root.
-mkdir -p $TMPDIR/${LIBPODROOT##$HOME}
mkdir -p $TMPDIR/.ssh
mkdir -p {$HOME,$TMPDIR}/.config/gcloud/ssh
chmod 700 {$HOME,$TMPDIR}/.config/gcloud/ssh $TMPDIR/.ssh
-cd $LIBPODROOT
+echo -e "\n${YEL}Pulling gcloud image...${NOR}"
+podman pull $GCLOUD_IMAGE
-# Attempt to determine if named 'libpod' gcloud configuration exists
-showrun $PGCLOUD info > $TMPDIR/gcloud-info
-if egrep -q "Account:.*None" $TMPDIR/gcloud-info
+if ! has_valid_credentials
then
echo -e "\n${YEL}WARNING: Can't find gcloud configuration for libpod, running init.${NOR}"
- echo -e " ${RED}Please choose "#1: Re-initialize" and "login" if asked.${NOR}"
+ echo -e " ${RED}Please choose \"#1: Re-initialize\" and \"login\" if asked.${NOR}"
showrun $PGCLOUD init --project=$PROJECT --console-only --skip-diagnostics
# Verify it worked (account name == someone@example.com)
@@ -236,68 +210,52 @@ then
exit 5
fi
- # If this is the only config, make it the default to avoid persistent warnings from gcloud
+ # If this is the only config, make it the default to avoid
+ # persistent warnings from gcloud about there being no default.
[[ -r "$HOME/.config/gcloud/configurations/config_default" ]] || \
- ln "$HOME/.config/gcloud/configurations/config_libpod" \
- "$HOME/.config/gcloud/configurations/config_default"
+ ln "$HOME/.config/gcloud/configurations/config_libpod" \
+ "$HOME/.config/gcloud/configurations/config_default"
fi
-# Couldn't make rsync work with gcloud's ssh wrapper because ssh-keys generated on the fly
-TARBALL=$VMNAME.tar.bz2
-echo -e "\n${YEL}Packing up local repository into a tarball.${NOR}"
-showrun --background tar cjf $TMPDIR/$TARBALL --warning=no-file-changed --exclude-vcs-ignores -C $LIBPODROOT .
-
-trap delvm INT # Allow deleting VM if CTRL-C during create
-# This fails if VM already exists: permit this usage to re-init
+trap delvm EXIT # Allow deleting VM if CTRL-C during create
echo -e "\n${YEL}Trying to creating a VM named $VMNAME${NOR}\n${YEL}in GCE region/zone $ZONE${NOR}"
-echo -e "For faster access, export ZONE='something-closer-<any letter>'"
-echo 'List of regions and zones: https://cloud.google.com/compute/docs/regions-zones/'
-echo -e "${RED}(might take a minute/two. Errors ignored).${NOR}"
-showrun $CREATE_CMD || true # allow re-running commands below when "delete: N"
-
-# Any subsequent failure should prompt for VM deletion
-trap - INT
-trap delvm EXIT
-
-echo -e "\n${YEL}Waiting up to 30s for ssh port to open${NOR}"
-trap 'COUNT=9999' INT
-ATTEMPTS=10
-for (( COUNT=1 ; COUNT <= $ATTEMPTS ; COUNT++ ))
-do
- if $SSH_CMD --command "true"; then break; else sleep 3s; fi
-done
-if (( COUNT > $ATTEMPTS ))
-then
- echo -e "\n${RED}Failed${NOR}"
- exit 7
-fi
-echo -e "${YEL}Got it${NOR}"
-
-echo -e "\n${YEL}Removing and re-creating $GOSRC on $VMNAME.${NOR}"
-showrun $SSH_CMD --command "rm -rf $GOSRC"
-showrun $SSH_CMD --command "mkdir -p $GOSRC"
-
-echo -e "\n${YEL}Transferring tarball to $VMNAME.${NOR}"
-wait
-showrun $SCP_CMD $HOME/$TARBALL $SSHUSER@$VMNAME:/tmp/$TARBALL
-
-echo -e "\n${YEL}Unpacking tarball into $GOSRC on $VMNAME.${NOR}"
-showrun $SSH_CMD --command "tar xjf /tmp/$TARBALL -C $GOSRC"
+echo -e "For faster terminal access, export ZONE='<something-closer>'"
+echo -e 'Zone-list at: https://cloud.google.com/compute/docs/regions-zones/\n'
+if showrun $CREATE_CMD; then # Freshly created VM needs initial setup
+
+ echo -e "\n${YEL}Waiting up to 30s for ssh port to open${NOR}"
+ ATTEMPTS=10
+ trap "exit 1" INT
+ while ((ATTEMPTS)) && ! $SSH_CMD --command "true"; do
+ let "ATTEMPTS--"
+ echo -e "${RED}Nope, not yet.${NOR}"
+ sleep 3s
+ done
+ trap - INT
+ if ! ((ATTEMPTS)); then
+ echo -e "\n${RED}Failed${NOR}"
+ exit 7
+ fi
+ echo -e "${YEL}Got it. Cloning upstream repository as a starting point.${NOR}"
-echo -e "\n${YEL}Removing tarball on $VMNAME.${NOR}"
-showrun $SSH_CMD --command "rm -f /tmp/$TARBALL"
+ showrun $SSH_CMD -- "mkdir -p $GOSRC"
+ showrun $SSH_CMD -- "git clone --progress $GIT_REPO $GOSRC"
-echo -e "\n${YEL}Executing environment setup${NOR}"
-showrun $SSH_CMD --command "$SETUP_CMD"
+ if [[ -x "$HOME/$GCLOUD_CFGDIR/$HOOK_FILENAME" ]]; then
+ echo -e "\n${YEL}Copying hook to VM and executing (ignoring errors).${NOR}"
+ $PGCLOUD compute scp "/root/$GCLOUD_CFGDIR/$HOOK_FILENAME" root@$VMNAME:.
+ if ! showrun $SSH_CMD -- "cd $GOSRC && bash /root/$HOOK_FILENAME $(git branch --show-current) $(git rev-parse HEAD)"; then
+ echo "-e ${RED}Hook exited: $?${NOR}"
+ fi
+ fi
+fi
-VMIP=$($PGCLOUD compute instances describe $VMNAME --format='get(networkInterfaces[0].accessConfigs[0].natIP)')
+echo -e "\n${YEL}Generating connection script for $VMNAME.${NOR}"
+echo -e "Note: Script can be re-used in another terminal if needed."
+echo -e "${RED}(option to delete VM presented upon exiting).${NOR}"
+# TODO: This is fairly fragile, specifically the quoting for the remote command.
+echo '#!/bin/bash' > $TMPDIR/ssh
+echo "$SSH_CMD -- -t 'cd $GOSRC && exec env \"${ENVS[*]}\" bash -il'" >> $TMPDIR/ssh
+chmod +x $TMPDIR/ssh
-echo -e "\n${YEL}Connecting to $VMNAME${NOR}\nPublic IP Address: $VMIP\n${RED}(option to delete VM upon logout).${NOR}\n"
-if [[ -n "$ROOTLESS_USER" ]]
-then
- echo "Re-chowning source files after transfer"
- showrun $SSH_CMD --command "chown -R $ROOTLESS_USER $GOSRC"
- echo "Connecting as user $ROOTLESS_USER"
- SSH_CMD="$PGCLOUD compute ssh $ROOTLESS_USER@$VMNAME"
-fi
-showrun $SSH_CMD -- -t "cd $GOSRC && exec env $ENVS bash -il"
+showrun $TMPDIR/ssh
diff --git a/libpod/image/docker_registry_options.go b/libpod/image/docker_registry_options.go
index 257b7ae8d..835473a1f 100644
--- a/libpod/image/docker_registry_options.go
+++ b/libpod/image/docker_registry_options.go
@@ -55,6 +55,7 @@ func (o DockerRegistryOptions) GetSystemContext(parent *types.SystemContext, add
sc.DockerRegistryUserAgent = parent.DockerRegistryUserAgent
sc.OSChoice = parent.OSChoice
sc.ArchitectureChoice = parent.ArchitectureChoice
+ sc.BlobInfoCacheDir = parent.BlobInfoCacheDir
}
return sc
}
diff --git a/libpod/kube.go b/libpod/kube.go
index 6df79e394..cd5064c84 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -307,18 +307,40 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
kubeContainer.StdinOnce = false
kubeContainer.TTY = c.config.Spec.Process.Terminal
- // TODO add CPU limit support.
if c.config.Spec.Linux != nil &&
- c.config.Spec.Linux.Resources != nil &&
- c.config.Spec.Linux.Resources.Memory != nil &&
- c.config.Spec.Linux.Resources.Memory.Limit != nil {
- if kubeContainer.Resources.Limits == nil {
- kubeContainer.Resources.Limits = v1.ResourceList{}
+ c.config.Spec.Linux.Resources != nil {
+ if c.config.Spec.Linux.Resources.Memory != nil &&
+ c.config.Spec.Linux.Resources.Memory.Limit != nil {
+ if kubeContainer.Resources.Limits == nil {
+ kubeContainer.Resources.Limits = v1.ResourceList{}
+ }
+
+ qty := kubeContainer.Resources.Limits.Memory()
+ qty.Set(*c.config.Spec.Linux.Resources.Memory.Limit)
+ kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty
}
- qty := kubeContainer.Resources.Limits.Memory()
- qty.Set(*c.config.Spec.Linux.Resources.Memory.Limit)
- kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty
+ if c.config.Spec.Linux.Resources.CPU != nil &&
+ c.config.Spec.Linux.Resources.CPU.Quota != nil &&
+ c.config.Spec.Linux.Resources.CPU.Period != nil {
+ quota := *c.config.Spec.Linux.Resources.CPU.Quota
+ period := *c.config.Spec.Linux.Resources.CPU.Period
+
+ if quota > 0 && period > 0 {
+ cpuLimitMilli := int64(1000 * float64(quota) / float64(period))
+
+ // Kubernetes: precision finer than 1m is not allowed
+ if cpuLimitMilli >= 1 {
+ if kubeContainer.Resources.Limits == nil {
+ kubeContainer.Resources.Limits = v1.ResourceList{}
+ }
+
+ qty := kubeContainer.Resources.Limits.Cpu()
+ qty.SetMilli(cpuLimitMilli)
+ kubeContainer.Resources.Limits[v1.ResourceCPU] = *qty
+ }
+ }
+ }
}
return kubeContainer, kubeVolumes, nil
diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json
index cd885fce2..31795516c 100644
--- a/nix/nixpkgs.json
+++ b/nix/nixpkgs.json
@@ -1,7 +1,7 @@
{
"url": "https://github.com/nixos/nixpkgs",
- "rev": "d5a689edda8219a1e20fd3871174b994cf0a94a3",
- "date": "2020-09-13T01:58:20+02:00",
- "sha256": "0m6nmi1fx0glfbg52kqdjgidxylk4p5xnx9v35wlsfi1j2xhkia4",
+ "rev": "c095d986c73b4e3d82af299b4175b9b475ebbf3a",
+ "date": "2020-10-07T23:58:44-03:00",
+ "sha256": "0ygv3wq26mxvy6kahs95ivl6n80bac3pbh6xmgw9ijcnnr03lm01",
"fetchSubmodules": false
}
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index 7b272f01e..3b6dd106f 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -131,7 +131,6 @@ type RmOptions struct {
Force bool
Ignore bool
Latest bool
- Storage bool
Volumes bool
}
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index 2ba369b83..356e6869d 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -24,6 +24,8 @@ type PlayKubeOptions struct {
// SeccompProfileRoot - path to a directory containing seccomp
// profiles.
SeccompProfileRoot string
+ // ConfigMaps - slice of pathnames to kubernetes configmap YAMLs.
+ ConfigMaps []string
}
// PlayKubePod represents a single pod and associated containers created by play kube
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 0107e18c4..ac7523094 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -273,16 +273,6 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
reports := []*entities.RmReport{}
- if options.Storage {
- for _, ctr := range namesOrIds {
- report := entities.RmReport{Id: ctr}
- if err := ic.Libpod.RemoveStorageContainer(ctr, options.Force); err != nil {
- report.Err = err
- }
- reports = append(reports, &report)
- }
- return reports, nil
- }
names := namesOrIds
for _, cidFile := range options.CIDFiles {
@@ -294,6 +284,22 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
names = append(names, id)
}
+ // Attempt to remove named containers directly from storage, if container is defined in libpod
+ // this will fail and code will fall through to removing the container from libpod.`
+ tmpNames := []string{}
+ for _, ctr := range names {
+ report := entities.RmReport{Id: ctr}
+ if err := ic.Libpod.RemoveStorageContainer(ctr, options.Force); err != nil {
+ // remove container names that we successfully deleted
+ tmpNames = append(tmpNames, ctr)
+ } else {
+ reports = append(reports, &report)
+ }
+ }
+ if len(tmpNames) < len(names) {
+ names = tmpNames
+ }
+
ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
// Failed to get containers. If force is specified, get the containers ID
@@ -302,7 +308,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
return nil, err
}
- for _, ctr := range namesOrIds {
+ for _, ctr := range names {
logrus.Debugf("Evicting container %q", ctr)
report := entities.RmReport{Id: ctr}
id, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 40edc1ae3..2de98d8f5 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -311,6 +311,22 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
ctrRestartPolicy = libpod.RestartPolicyAlways
}
+ configMaps := []v1.ConfigMap{}
+ for _, p := range options.ConfigMaps {
+ f, err := os.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ cm, err := readConfigMapFromFile(f)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%q", p)
+ }
+
+ configMaps = append(configMaps, cm)
+ }
+
containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers))
for _, container := range podYAML.Spec.Containers {
pullPolicy := util.PullImageMissing
@@ -334,7 +350,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if err != nil {
return nil, err
}
- conf, err := kubeContainerToCreateConfig(ctx, container, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, seccompPaths)
+ conf, err := kubeContainerToCreateConfig(ctx, container, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, configMaps, seccompPaths)
if err != nil {
return nil, err
}
@@ -447,7 +463,7 @@ func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfi
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
-func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, configMaps []v1.ConfigMap, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
pidConfig createconfig.PidConfig
@@ -572,8 +588,17 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
}
envs = imageEnv
}
- for _, e := range containerYAML.Env {
- envs[e.Name] = e.Value
+ for _, env := range containerYAML.Env {
+ value := envVarValue(env, configMaps)
+
+ envs[env.Name] = value
+ }
+ for _, envFrom := range containerYAML.EnvFrom {
+ cmEnvs := envVarsFromConfigMap(envFrom, configMaps)
+
+ for k, v := range cmEnvs {
+ envs[k] = v
+ }
}
containerConfig.Env = envs
@@ -594,6 +619,62 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
return &containerConfig, nil
}
+// readConfigMapFromFile returns a kubernetes configMap obtained from --configmap flag
+func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
+ var cm v1.ConfigMap
+
+ content, err := ioutil.ReadAll(r)
+ if err != nil {
+ return cm, errors.Wrapf(err, "unable to read ConfigMap YAML content")
+ }
+
+ if err := yaml.Unmarshal(content, &cm); err != nil {
+ return cm, errors.Wrapf(err, "unable to read YAML as Kube ConfigMap")
+ }
+
+ if cm.Kind != "ConfigMap" {
+ return cm, errors.Errorf("invalid YAML kind: %q. [ConfigMap] is the only supported by --configmap", cm.Kind)
+ }
+
+ return cm, nil
+}
+
+// envVarsFromConfigMap returns all key-value pairs as env vars from a configMap that matches the envFrom setting of a container
+func envVarsFromConfigMap(envFrom v1.EnvFromSource, configMaps []v1.ConfigMap) map[string]string {
+ envs := map[string]string{}
+
+ if envFrom.ConfigMapRef != nil {
+ cmName := envFrom.ConfigMapRef.Name
+
+ for _, c := range configMaps {
+ if cmName == c.Name {
+ envs = c.Data
+ break
+ }
+ }
+ }
+
+ return envs
+}
+
+// envVarValue returns the environment variable value configured within the container's env setting.
+// It gets the value from a configMap if specified, otherwise returns env.Value
+func envVarValue(env v1.EnvVar, configMaps []v1.ConfigMap) string {
+ for _, c := range configMaps {
+ if env.ValueFrom != nil {
+ if env.ValueFrom.ConfigMapKeyRef != nil {
+ if env.ValueFrom.ConfigMapKeyRef.Name == c.Name {
+ if value, ok := c.Data[env.ValueFrom.ConfigMapKeyRef.Key]; ok {
+ return value
+ }
+ }
+ }
+ }
+ }
+
+ return env.Value
+}
+
// kubeSeccompPaths holds information about a pod YAML's seccomp configuration
// it holds both container and pod seccomp paths
type kubeSeccompPaths struct {
diff --git a/pkg/domain/infra/abi/play_test.go b/pkg/domain/infra/abi/play_test.go
new file mode 100644
index 000000000..5595476c3
--- /dev/null
+++ b/pkg/domain/infra/abi/play_test.go
@@ -0,0 +1,254 @@
+package abi
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var configMapList = []v1.ConfigMap{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bar",
+ },
+ Data: map[string]string{
+ "myvar": "bar",
+ },
+ },
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ },
+ Data: map[string]string{
+ "myvar": "foo",
+ },
+ },
+}
+
+func TestReadConfigMapFromFile(t *testing.T) {
+ tests := []struct {
+ name string
+ configMapContent string
+ expectError bool
+ expectedErrorMsg string
+ expected v1.ConfigMap
+ }{
+ {
+ "ValidConfigMap",
+ `
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ false,
+ "",
+ v1.ConfigMap{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ APIVersion: "v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ },
+ Data: map[string]string{
+ "myvar": "foo",
+ },
+ },
+ },
+ {
+ "InvalidYAML",
+ `
+Invalid YAML
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ true,
+ "unable to read YAML as Kube ConfigMap",
+ v1.ConfigMap{},
+ },
+ {
+ "InvalidKind",
+ `
+apiVersion: v1
+kind: InvalidKind
+metadata:
+ name: foo
+data:
+ myvar: foo
+`,
+ true,
+ "invalid YAML kind",
+ v1.ConfigMap{},
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ buf := bytes.NewBufferString(test.configMapContent)
+ cm, err := readConfigMapFromFile(buf)
+
+ if test.expectError {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), test.expectedErrorMsg)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, test.expected, cm)
+ }
+ })
+ }
+}
+
+func TestEnvVarsFromConfigMap(t *testing.T) {
+ tests := []struct {
+ name string
+ envFrom v1.EnvFromSource
+ configMapList []v1.ConfigMap
+ expected map[string]string
+ }{
+ {
+ "ConfigMapExists",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ },
+ },
+ configMapList,
+ map[string]string{
+ "myvar": "foo",
+ },
+ },
+ {
+ "ConfigMapDoesNotExist",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "doesnotexist",
+ },
+ },
+ },
+ configMapList,
+ map[string]string{},
+ },
+ {
+ "EmptyConfigMapList",
+ v1.EnvFromSource{
+ ConfigMapRef: &v1.ConfigMapEnvSource{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ },
+ },
+ []v1.ConfigMap{},
+ map[string]string{},
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ result := envVarsFromConfigMap(test.envFrom, test.configMapList)
+ assert.Equal(t, test.expected, result)
+ })
+ }
+}
+
+func TestEnvVarValue(t *testing.T) {
+ tests := []struct {
+ name string
+ envVar v1.EnvVar
+ configMapList []v1.ConfigMap
+ expected string
+ }{
+ {
+ "ConfigMapExists",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ configMapList,
+ "foo",
+ },
+ {
+ "ContainerKeyDoesNotExistInConfigMap",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "doesnotexist",
+ },
+ },
+ },
+ configMapList,
+ "",
+ },
+ {
+ "ConfigMapDoesNotExist",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "doesnotexist",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ configMapList,
+ "",
+ },
+ {
+ "EmptyConfigMapList",
+ v1.EnvVar{
+ Name: "FOO",
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
+ Name: "foo",
+ },
+ Key: "myvar",
+ },
+ },
+ },
+ []v1.ConfigMap{},
+ "",
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ result := envVarValue(test.envVar, test.configMapList)
+ assert.Equal(t, test.expected, result)
+ })
+ }
+}
diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at
index 28289955a..d7e5bfee8 100644
--- a/test/apiv2/20-containers.at
+++ b/test/apiv2/20-containers.at
@@ -206,7 +206,7 @@ t POST containers/${cid_top}/stop "" 204
t DELETE containers/$cid 204
t DELETE containers/$cid_top 204
-# test the apiv2 create, should't ignore the ENV and WORKDIR from the image
+# test the apiv2 create, shouldn't ignore the ENV and WORKDIR from the image
t POST containers/create '"Image":"'$ENV_WORKDIR_IMG'","Env":["testKey1"]' 201 \
.Id~[0-9a-f]\\{64\\}
cid=$(jq -r '.Id' <<<"$output")
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index 05a7f4ddf..3c4a1008b 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -260,6 +260,38 @@ var _ = Describe("Podman generate kube", func() {
}
})
+ It("podman generate kube on pod with cpu limit", func() {
+ podName := "testCpuLimit"
+ podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName})
+ podSession.WaitWithDefaultTimeout()
+ Expect(podSession.ExitCode()).To(Equal(0))
+
+ ctr1Name := "ctr1"
+ ctr1Session := podmanTest.Podman([]string{"create", "--name", ctr1Name, "--pod", podName,
+ "--cpus", "0.5", ALPINE, "top"})
+ ctr1Session.WaitWithDefaultTimeout()
+ Expect(ctr1Session.ExitCode()).To(Equal(0))
+
+ ctr2Name := "ctr2"
+ ctr2Session := podmanTest.Podman([]string{"create", "--name", ctr2Name, "--pod", podName,
+ "--cpu-period", "100000", "--cpu-quota", "50000", ALPINE, "top"})
+ ctr2Session.WaitWithDefaultTimeout()
+ Expect(ctr2Session.ExitCode()).To(Equal(0))
+
+ kube := podmanTest.Podman([]string{"generate", "kube", podName})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ pod := new(v1.Pod)
+ err := yaml.Unmarshal(kube.Out.Contents(), pod)
+ Expect(err).To(BeNil())
+
+ for _, ctr := range pod.Spec.Containers {
+ cpuLimit := ctr.Resources.Limits.Cpu().MilliValue()
+ Expect(cpuLimit).To(Equal(int64(500)))
+ }
+ })
+
It("podman generate kube on pod with ports", func() {
podName := "test"
podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName, "-p", "4000:4000", "-p", "5000:5000"})
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index a15359ea3..cbfd72da6 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -28,7 +28,7 @@ func removeConf(confPath string) {
// generateNetworkConfig generates a cni config with a random name
// it returns the network name and the filepath
func generateNetworkConfig(p *PodmanTestIntegration) (string, string) {
- // generate a random name to preven conflicts with other tests
+ // generate a random name to prevent conflicts with other tests
name := "net" + stringid.GenerateNonCryptoID()
path := filepath.Join(p.CNIConfigDir, fmt.Sprintf("%s.conflist", name))
conf := fmt.Sprintf(`{
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index b7398a58a..b6a390950 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -25,6 +25,19 @@ spec:
hostname: unknown
`
+var configMapYamlTemplate = `
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Name }}
+data:
+{{ with .Data }}
+ {{ range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{ end }}
+{{ end }}
+`
+
var podYamlTemplate = `
apiVersion: v1
kind: Pod
@@ -75,6 +88,26 @@ spec:
- name: HOSTNAME
- name: container
value: podman
+ {{ range .Env }}
+ - name: {{ .Name }}
+ {{ if (eq .ValueFrom "configmap") }}
+ valueFrom:
+ configMapKeyRef:
+ name: {{ .RefName }}
+ key: {{ .RefKey }}
+ {{ else }}
+ value: {{ .Value }}
+ {{ end }}
+ {{ end }}
+ {{ with .EnvFrom}}
+ envFrom:
+ {{ range . }}
+ {{ if (eq .From "configmap") }}
+ - configMapRef:
+ name: {{ .Name }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
image: {{ .Image }}
name: {{ .Name }}
imagePullPolicy: {{ .PullPolicy }}
@@ -226,6 +259,7 @@ var (
defaultPodName = "testPod"
defaultVolName = "testVol"
defaultDeploymentName = "testDeployment"
+ defaultConfigMapName = "testConfigMap"
seccompPwdEPERM = []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
)
@@ -244,34 +278,64 @@ func writeYaml(content string, fileName string) error {
return nil
}
-func generatePodKubeYaml(pod *Pod, fileName string) error {
+func generateKubeYaml(kind string, object interface{}, pathname string) error {
+ var yamlTemplate string
templateBytes := &bytes.Buffer{}
- t, err := template.New("pod").Parse(podYamlTemplate)
+ switch kind {
+ case "configmap":
+ yamlTemplate = configMapYamlTemplate
+ case "pod":
+ yamlTemplate = podYamlTemplate
+ case "deployment":
+ yamlTemplate = deploymentYamlTemplate
+ default:
+ return fmt.Errorf("unsupported kubernetes kind")
+ }
+
+ t, err := template.New(kind).Parse(yamlTemplate)
if err != nil {
return err
}
- if err := t.Execute(templateBytes, pod); err != nil {
+ if err := t.Execute(templateBytes, object); err != nil {
return err
}
- return writeYaml(templateBytes.String(), fileName)
+ return writeYaml(templateBytes.String(), pathname)
}
-func generateDeploymentKubeYaml(deployment *Deployment, fileName string) error {
- templateBytes := &bytes.Buffer{}
+// ConfigMap describes the options a kube yaml can be configured at configmap level
+type ConfigMap struct {
+ Name string
+ Data map[string]string
+}
- t, err := template.New("deployment").Parse(deploymentYamlTemplate)
- if err != nil {
- return err
+func getConfigMap(options ...configMapOption) *ConfigMap {
+ cm := ConfigMap{
+ Name: defaultConfigMapName,
+ Data: map[string]string{},
}
- if err := t.Execute(templateBytes, deployment); err != nil {
- return err
+ for _, option := range options {
+ option(&cm)
}
- return writeYaml(templateBytes.String(), fileName)
+ return &cm
+}
+
+type configMapOption func(*ConfigMap)
+
+func withConfigMapName(name string) configMapOption {
+ return func(configmap *ConfigMap) {
+ configmap.Name = name
+ }
+}
+
+func withConfigMapData(k, v string) configMapOption {
+ return func(configmap *ConfigMap) {
+ configmap.Data[k] = v
+ }
}
// Pod describes the options a kube yaml can be configured at pod level
@@ -450,12 +514,14 @@ type Ctr struct {
VolumeMountPath string
VolumeName string
VolumeReadOnly bool
+ Env []Env
+ EnvFrom []EnvFrom
}
// getCtr takes a list of ctrOptions and returns a Ctr with sane defaults
// and the configured options
func getCtr(options ...ctrOption) *Ctr {
- c := Ctr{defaultCtrName, defaultCtrImage, defaultCtrCmd, defaultCtrArg, true, false, nil, nil, "", "", "", false, "", "", false}
+ c := Ctr{defaultCtrName, defaultCtrImage, defaultCtrCmd, defaultCtrArg, true, false, nil, nil, "", "", "", false, "", "", false, []Env{}, []EnvFrom{}}
for _, option := range options {
option(&c)
}
@@ -524,6 +590,31 @@ func withVolumeMount(mountPath string, readonly bool) ctrOption {
}
}
+func withEnv(name, value, valueFrom, refName, refKey string) ctrOption {
+ return func(c *Ctr) {
+ e := Env{
+ Name: name,
+ Value: value,
+ ValueFrom: valueFrom,
+ RefName: refName,
+ RefKey: refKey,
+ }
+
+ c.Env = append(c.Env, e)
+ }
+}
+
+func withEnvFrom(name, from string) ctrOption {
+ return func(c *Ctr) {
+ e := EnvFrom{
+ Name: name,
+ From: from,
+ }
+
+ c.EnvFrom = append(c.EnvFrom, e)
+ }
+}
+
func getCtrNameInPod(pod *Pod) string {
return fmt.Sprintf("%s-%s", pod.Name, defaultCtrName)
}
@@ -544,6 +635,19 @@ func getVolume(vType, vPath string) *Volume {
}
}
+type Env struct {
+ Name string
+ Value string
+ ValueFrom string
+ RefName string
+ RefKey string
+}
+
+type EnvFrom struct {
+ Name string
+ From string
+}
+
var _ = Describe("Podman generate kube", func() {
var (
tempdir string
@@ -581,7 +685,7 @@ var _ = Describe("Podman generate kube", func() {
})
It("podman play kube fail with nonexist authfile", func() {
- err := generatePodKubeYaml(getPod(), kubeYaml)
+ err := generateKubeYaml("pod", getPod(), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", "--authfile", "/tmp/nonexist", kubeYaml})
@@ -592,7 +696,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct command", func() {
pod := getPod()
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -609,7 +713,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct command with only set command in yaml file", func() {
pod := getPod(withCtr(getCtr(withCmd([]string{"echo", "hello"}), withArg(nil))))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -626,7 +730,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct command with only set args in yaml file", func() {
pod := getPod(withCtr(getCtr(withImage(redis), withCmd(nil), withArg([]string{"echo", "hello"}))))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -644,7 +748,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test correct output", func() {
p := getPod(withCtr(getCtr(withCmd([]string{"echo", "hello"}), withArg([]string{"world"}))))
- err := generatePodKubeYaml(p, kubeYaml)
+ err := generateKubeYaml("pod", p, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -665,14 +769,14 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test restartPolicy", func() {
// podName, set, expect
testSli := [][]string{
- {"testPod1", "", "always"}, // Default eqaul to always
+ {"testPod1", "", "always"}, // Default equal to always
{"testPod2", "Always", "always"},
{"testPod3", "OnFailure", "on-failure"},
{"testPod4", "Never", "no"},
}
for _, v := range testSli {
pod := getPod(withPodName(v[0]), withRestartPolicy(v[1]))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -686,9 +790,52 @@ var _ = Describe("Podman generate kube", func() {
}
})
+ It("podman play kube test env value from configmap", func() {
+ SkipIfRemote("configmap list is not supported as a param")
+ cmYamlPathname := filepath.Join(podmanTest.TempDir, "foo-cm.yaml")
+ cm := getConfigMap(withConfigMapName("foo"), withConfigMapData("FOO", "foo"))
+ err := generateKubeYaml("configmap", cm, cmYamlPathname)
+ Expect(err).To(BeNil())
+
+ pod := getPod(withCtr(getCtr(withEnv("FOO", "", "configmap", "foo", "FOO"))))
+ err = generateKubeYaml("pod", pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml, "--configmap", cmYamlPathname})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "'{{ .Config.Env }}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(`FOO=foo`))
+ })
+
+ It("podman play kube test get all key-value pairs from configmap as envs", func() {
+ SkipIfRemote("configmap list is not supported as a param")
+ cmYamlPathname := filepath.Join(podmanTest.TempDir, "foo-cm.yaml")
+ cm := getConfigMap(withConfigMapName("foo"), withConfigMapData("FOO1", "foo1"), withConfigMapData("FOO2", "foo2"))
+ err := generateKubeYaml("configmap", cm, cmYamlPathname)
+ Expect(err).To(BeNil())
+
+ pod := getPod(withCtr(getCtr(withEnvFrom("foo", "configmap"))))
+ err = generateKubeYaml("pod", pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", kubeYaml, "--configmap", cmYamlPathname})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "'{{ .Config.Env }}'"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring(`FOO1=foo1`))
+ Expect(inspect.OutputToString()).To(ContainSubstring(`FOO2=foo2`))
+ })
+
It("podman play kube test hostname", func() {
pod := getPod()
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -704,7 +851,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube test with customized hostname", func() {
hostname := "myhostname"
pod := getPod(withHostname(hostname))
- err := generatePodKubeYaml(getPod(withHostname(hostname)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withHostname(hostname)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -727,7 +874,7 @@ var _ = Describe("Podman generate kube", func() {
"test4.podman.io",
}),
)
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -746,7 +893,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCapAdd([]string{capAdd}), withCmd([]string{"cat", "/proc/self/status"}), withArg(nil))
pod := getPod(withCtr(ctr))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -764,7 +911,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCapDrop([]string{capDrop}))
pod := getPod(withCtr(ctr))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -780,7 +927,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube no security context", func() {
// expect play kube to not fail if no security context is specified
pod := getPod(withCtr(getCtr(withSecurityContext(false))))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -805,7 +952,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCmd([]string{"pwd"}), withArg(nil))
pod := getPod(withCtr(ctr), withAnnotation(ctrAnnotation, "localhost/"+filepath.Base(jsonFile)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
// CreateSeccompJson will put the profile into podmanTest.TempDir. Use --seccomp-profile-root to tell play kube where to look
@@ -832,7 +979,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := getCtr(withCmd([]string{"pwd"}), withArg(nil))
pod := getPod(withCtr(ctr), withAnnotation("seccomp.security.alpha.kubernetes.io/pod", "localhost/"+filepath.Base(jsonFile)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
// CreateSeccompJson will put the profile into podmanTest.TempDir. Use --seccomp-profile-root to tell play kube where to look
@@ -848,7 +995,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube with pull policy of never should be 125", func() {
ctr := getCtr(withPullPolicy("never"), withImage(BB_GLIBC))
- err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -858,7 +1005,7 @@ var _ = Describe("Podman generate kube", func() {
It("podman play kube with pull policy of missing", func() {
ctr := getCtr(withPullPolicy("missing"), withImage(BB))
- err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -884,7 +1031,7 @@ var _ = Describe("Podman generate kube", func() {
oldBBinspect := inspect.InspectImageJSON()
ctr := getCtr(withPullPolicy("always"), withImage(BB))
- err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -915,7 +1062,7 @@ var _ = Describe("Podman generate kube", func() {
oldBBinspect := inspect.InspectImageJSON()
ctr := getCtr(withImage(BB))
- err := generatePodKubeYaml(getPod(withCtr(ctr)), kubeYaml)
+ err := generateKubeYaml("pod", getPod(withCtr(ctr)), kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -973,7 +1120,7 @@ spec:
// Deployment related tests
It("podman play kube deployment 1 replica test correct command", func() {
deployment := getDeployment()
- err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ err := generateKubeYaml("deployment", deployment, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -993,7 +1140,7 @@ spec:
var i, numReplicas int32
numReplicas = 5
deployment := getDeployment(withReplicas(numReplicas))
- err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ err := generateKubeYaml("deployment", deployment, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1016,7 +1163,7 @@ spec:
ctr := getCtr(withHostIP(ip, port), withImage(BB))
pod := getPod(withCtr(ctr))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1033,7 +1180,7 @@ spec:
hostPathLocation := filepath.Join(tempdir, "file")
pod := getPod(withVolume(getVolume(`""`, hostPathLocation)))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1048,7 +1195,7 @@ spec:
f.Close()
pod := getPod(withVolume(getVolume(`""`, hostPathLocation)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1060,7 +1207,7 @@ spec:
hostPathLocation := filepath.Join(tempdir, "file")
pod := getPod(withVolume(getVolume("File", hostPathLocation)))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1075,7 +1222,7 @@ spec:
f.Close()
pod := getPod(withVolume(getVolume("File", hostPathLocation)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1087,7 +1234,7 @@ spec:
hostPathLocation := filepath.Join(tempdir, "file")
pod := getPod(withVolume(getVolume("FileOrCreate", hostPathLocation)))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1103,7 +1250,7 @@ spec:
hostPathLocation := filepath.Join(tempdir, "file")
pod := getPod(withVolume(getVolume("DirectoryOrCreate", hostPathLocation)))
- err := generatePodKubeYaml(pod, kubeYaml)
+ err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1123,7 +1270,7 @@ spec:
f.Close()
pod := getPod(withVolume(getVolume("Socket", hostPathLocation)))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1139,7 +1286,7 @@ spec:
ctr := getCtr(withVolumeMount(hostPathLocation, true), withImage(BB))
pod := getPod(withVolume(getVolume("File", hostPathLocation)), withCtr(ctr))
- err = generatePodKubeYaml(pod, kubeYaml)
+ err = generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
@@ -1162,7 +1309,7 @@ spec:
withReplicas(numReplicas),
withPod(getPod(withLabel(expectedLabelKey, expectedLabelValue))),
)
- err := generateDeploymentKubeYaml(deployment, kubeYaml)
+ err := generateKubeYaml("deployment", deployment, kubeYaml)
Expect(err).To(BeNil())
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index 0f2ce2d46..48ef566ce 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -11,6 +11,7 @@ import (
"github.com/docker/go-units"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gexec"
)
var _ = Describe("Podman ps", func() {
@@ -218,17 +219,16 @@ var _ = Describe("Podman ps", func() {
})
It("podman ps namespace flag with go template format", func() {
- Skip("FIXME: table still not supported in podman ps command")
_, ec, _ := podmanTest.RunLsContainer("test1")
Expect(ec).To(Equal(0))
result := podmanTest.Podman([]string{"ps", "-a", "--format", "table {{.ID}} {{.Image}} {{.ImageID}} {{.Labels}}"})
result.WaitWithDefaultTimeout()
- Expect(strings.Contains(result.OutputToStringArray()[0], "table")).To(BeFalse())
- Expect(strings.Contains(result.OutputToStringArray()[0], "ID")).To(BeTrue())
- Expect(strings.Contains(result.OutputToStringArray()[0], "ImageID")).To(BeTrue())
- Expect(strings.Contains(result.OutputToStringArray()[1], "alpine:latest")).To(BeTrue())
- Expect(result.ExitCode()).To(Equal(0))
+
+ Expect(result.OutputToStringArray()[0]).ToNot(ContainSubstring("table"))
+ Expect(result.OutputToStringArray()[0]).ToNot(ContainSubstring("ImageID"))
+ Expect(result.OutputToStringArray()[0]).To(ContainSubstring("alpine:latest"))
+ Expect(result).Should(Exit(0))
})
It("podman ps ancestor filter flag", func() {
diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go
index 7eff8c6ed..524c07cc6 100644
--- a/test/e2e/rm_test.go
+++ b/test/e2e/rm_test.go
@@ -236,7 +236,6 @@ var _ = Describe("Podman rm", func() {
})
It("podman rm --ignore bogus container and a running container", func() {
-
session := podmanTest.RunTopContainer("test1")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index cd32e5a77..e6bba9f67 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -394,7 +394,7 @@ USER bin`
})
It("podman run sysctl test", func() {
- SkipIfRootless("Network sysctls are not avalable root rootless")
+ SkipIfRootless("Network sysctls are not available root rootless")
session := podmanTest.Podman([]string{"run", "--rm", "--sysctl", "net.core.somaxconn=65535", ALPINE, "sysctl", "net.core.somaxconn"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/e2e/volume_ls_test.go b/test/e2e/volume_ls_test.go
index 4a2c2d324..1cb6440aa 100644
--- a/test/e2e/volume_ls_test.go
+++ b/test/e2e/volume_ls_test.go
@@ -7,6 +7,7 @@ import (
. "github.com/containers/podman/v2/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gexec"
)
var _ = Describe("Podman volume ls", func() {
@@ -56,15 +57,15 @@ var _ = Describe("Podman volume ls", func() {
})
It("podman ls volume with Go template", func() {
- Skip("FIXME: table still not supported in podman volume command")
session := podmanTest.Podman([]string{"volume", "create", "myvol"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"volume", "ls", "--format", "table {{.Name}} {{.Driver}} {{.Scope}}"})
session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(0))
- Expect(len(session.OutputToStringArray())).To(Equal(2))
+
+ Expect(session).Should(Exit(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(1), session.OutputToString())
})
It("podman ls volume with --filter flag", func() {
diff --git a/test/system/055-rm.bats b/test/system/055-rm.bats
index c8475c3e9..7176ae4b8 100644
--- a/test/system/055-rm.bats
+++ b/test/system/055-rm.bats
@@ -33,6 +33,21 @@ load helpers
run_podman rm -f $cid
}
+@test "podman rm container from storage" {
+ if is_remote; then
+ skip "only applicable for local podman"
+ fi
+ rand=$(random_string 30)
+ run_podman create --name $rand $IMAGE /bin/true
+
+ # Create a container that podman does not know about
+ run buildah from $IMAGE
+ cid="$output"
+
+ # rm should succeed
+ run_podman rm $rand $cid
+}
+
# I'm sorry! This test takes 13 seconds. There's not much I can do about it,
# please know that I think it's justified: podman 1.5.0 had a strange bug
# in with exit status was not preserved on some code paths with 'rm -f'