diff options
34 files changed, 680 insertions, 122 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 371f902c2..2106ac96d 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -30,7 +30,7 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - _BUILT_IMAGE_SUFFIX: "libpod-5874660151656448" + _BUILT_IMAGE_SUFFIX: "libpod-5940307564953600" FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}" @@ -48,8 +48,9 @@ env: #### Default to NOT operating in any special-case testing mode #### SPECIALMODE: "none" # don't do anything special - TEST_REMOTE_CLIENT: false # don't test remote client by default - ADD_SECOND_PARTITION: false # will certainly fail inside containers + TEST_REMOTE_CLIENT: 'false' # don't test remote client by default + ADD_SECOND_PARTITION: 'false' # will certainly fail inside containers + MOD_LIBPOD_CONF: 'true' # Update libpod.conf runtime if required by OS environment #### #### Credentials and other secret-sauces, decrypted at runtime when authorized. @@ -253,6 +254,9 @@ build_each_commit_task: cpu: 8 memory: "8Gb" + env: + MOD_LIBPOD_CONF: 'false' + timeout_in: 30m setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' @@ -282,6 +286,9 @@ build_without_cgo_task: cpu: 8 memory: "8Gb" + env: + MOD_LIBPOD_CONF: 'false' + timeout_in: 30m setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' @@ -381,10 +388,10 @@ testing_task: timeout_in: 120m env: - ADD_SECOND_PARTITION: true + ADD_SECOND_PARTITION: 'true' matrix: - TEST_REMOTE_CLIENT: true - TEST_REMOTE_CLIENT: false + TEST_REMOTE_CLIENT: 'true' + TEST_REMOTE_CLIENT: 'false' networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh' setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' @@ -428,11 +435,11 @@ special_testing_rootless_task: $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' env: - ADD_SECOND_PARTITION: true + ADD_SECOND_PARTITION: 'true' SPECIALMODE: 'rootless' # See docs matrix: - TEST_REMOTE_CLIENT: true - TEST_REMOTE_CLIENT: false + TEST_REMOTE_CLIENT: 'true' + TEST_REMOTE_CLIENT: 'false' timeout_in: 60m @@ -469,7 +476,8 @@ special_testing_in_podman_task: image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" env: - ADD_SECOND_PARTITION: true + ADD_SECOND_PARTITION: 'true' + MOD_LIBPOD_CONF: 'false' # Use existing/native setup SPECIALMODE: 'in_podman' # See docs # TODO: Support both runc and crun (cgroups v1 and v2 container images) # matrix: @@ -628,10 +636,10 @@ verify_test_built_images_task: image_name: "${PACKER_BUILDER_NAME}${BUILT_IMAGE_SUFFIX}" env: - ADD_SECOND_PARTITION: true + ADD_SECOND_PARTITION: 'true' matrix: - TEST_REMOTE_CLIENT: true - TEST_REMOTE_CLIENT: false + TEST_REMOTE_CLIENT: 'true' + TEST_REMOTE_CLIENT: 'false' matrix: # Required env. var. by check_image_script PACKER_BUILDER_NAME: "fedora-30" diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index f2381f7e3..f813b494f 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -6,6 +6,7 @@ - The `podman ps --format=json` command now includes the ID of the image containers were created with - The `podman create` and `podman run` commands now support the `--device-cgroup-rule` flag ([#4876](https://github.com/containers/libpod/issues/4876)) - While the HTTP API remains in alpha, many fixes and additions have landed. These are documented in a separate subsection below +- The `podman create` and `podman run` commands now feature a `--no-healthcheck` flag to disable healthchecks for a container ([#5299](https://github.com/containers/libpod/issues/5299)) ### Bugfixes - Fixed CVE-2020-1726, a security issue where volumes manually populated before first being mounted into a container could have those contents overwritten on first being mounted into a container @@ -28,6 +29,10 @@ - Fixed a bug where `podman commit --change` would perform incorrect validation, resulting in valid changes being rejected ([#5148](https://github.com/containers/libpod/issues/5148)) - Fixed a bug where `podman logs --tail` could take large amounts of memory when the log file for a container was large ([#5131](https://github.com/containers/libpod/issues/5131)) - Fixed a bug where Podman would sometimes incorrectly generate firewall rules on systems using `firewalld` +- Fixed a bug where the `podman inspect` command would not display network information for containers properly if a container joined multiple CNI networks ([#4907](https://github.com/containers/libpod/issues/4907)) +- Fixed a bug where the `--uts` flag to `podman create` and `podman run` would only allow specifying containers by full ID ([#5289](https://github.com/containers/libpod/issues/5289)) +- Fixed a bug where rootless Podman could segfault when passed a large number of file descriptors +- Fixed a bug where the `podman port` command was incorrectly interpreting additional arguments as container names, instead of port numbers ### HTTP API - Initial support for secure connections to servers via SSH tunneling has been added @@ -48,6 +53,7 @@ - The `CreatedTime` field to `podman images --format=json` has been renamed to `CreatedAt` as part of the fix for ([#5110](https://github.com/containers/libpod/issues/5110)). Go templates using the old name should still work - The `before` filter to `podman images` has been renamed to `since` for Docker compatibility. Using `before` will still work, but documentation has been changed to use the new `since` filter - Using the `--password` flag to `podman login` now warns that passwords are being passed in plaintext +- Some common cases where Podman would deadlock have been fixed to warn the user that `podman system renumber` must be run to resolve the deadlock ## 1.8.0 ### Features diff --git a/changelog.txt b/changelog.txt index 0dac716d0..84d6dcea0 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,44 @@ +- Changelog for v1.8.1-rc2 (2020-02-27) + * Update release notes for v1.8.1-rc2 + * Vendor in latest containers/buildah + * kill test: clean up warnings; document better + * curb flakes in integration tests + * spec: allow container alias name in lookup + * add epoch for specfile + * fix trivial typo + * Add support for multiple CNI networks in podman inspect + * Remove 1 sec delay + * Temp. skip "remove pause by id" bindings test + * Fix kill test obtaining CID + * System Tests: Force default signal handlers + * Fix cgroupsv2 run test, unexpected output + * Cirrus: SELinux Enforcing for F31 w/ CGv2 + * Cirrus: collect podman system info + * Cirrus: F31: Force systemd cgroup mgr + * Cirrus: Temp. disable F31 p-in-p testing + * Cirrus: Handle runc->crun when both are possible + * Cirrus: Use deadline elevator in F31 + * Cirrus: Support testing with F31 + * rootless: become root only if the pause file is specified + * rootless: fix segfault when open fd >= FD_SETSIZE + * apiv2 tests: add more pod tests, timing check + * Update vendor of buildah and containers/common + * build: move initialization after SetXdgDirs + * utils: relax check for directory to use + * add apiv2 tests for podman pause and stop + * always run the docs task on post-merge + * Fixed build_rpm.sh script for Fedora 30 + * Add basic deadlock detection for container start/remove + * Friendly amendment: tests, and a help message + * fix port list by container with port + * more image binding tests + * docs: symlink to host device is resolved + * Add --no-healthcheck command to create/run + * enable ci on go binding tests + * add more image tests for go bindings + * Bump to v1.8.1-dev + * build(deps): bump github.com/opencontainers/selinux from 1.3.1 to 1.3.2 + - Changelog for v1.8.1-rc1 (2020-02-21) * Update release notes for v1.8.1 * disable generation of cni firewall plugin diff --git a/cmd/podman/build.go b/cmd/podman/build.go index fa4689211..b8b315c68 100644 --- a/cmd/podman/build.go +++ b/cmd/podman/build.go @@ -352,6 +352,7 @@ func buildCmd(c *cliconfig.BuildValues) error { ContextDirectory: contextDir, DefaultMountsFilePath: c.GlobalFlags.DefaultMountsFile, Err: stderr, + In: os.Stdin, ForceRmIntermediateCtrs: c.ForceRm, IIDFile: c.Iidfile, Labels: c.Label, diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go index 6bc8aa4a3..ccc30c603 100644 --- a/cmd/podman/cliconfig/config.go +++ b/cmd/podman/cliconfig/config.go @@ -260,6 +260,7 @@ type LogsValues struct { Tail int64 Timestamps bool Latest bool + UseName bool } type MountValues struct { diff --git a/cmd/podman/logs.go b/cmd/podman/logs.go index ebc53ddf8..0a86fa128 100644 --- a/cmd/podman/logs.go +++ b/cmd/podman/logs.go @@ -37,6 +37,7 @@ var ( return nil }, Example: `podman logs ctrID + podman logs --names ctrID1 ctrID2 podman logs --tail 2 mywebserver podman logs --follow=true --since 10m ctrID podman logs mywebserver mydbserver`, @@ -54,6 +55,7 @@ func init() { flags.StringVar(&logsCommand.Since, "since", "", "Show logs since TIMESTAMP") flags.Int64Var(&logsCommand.Tail, "tail", -1, "Output the specified number of LINES at the end of the logs. Defaults to -1, which prints all lines") flags.BoolVarP(&logsCommand.Timestamps, "timestamps", "t", false, "Output the timestamps in the log") + flags.BoolVarP(&logsCommand.UseName, "names", "n", false, "Output the container name in the log") markFlagHidden(flags, "details") flags.SetInterspersed(false) @@ -85,6 +87,7 @@ func logsCmd(c *cliconfig.LogsValues) error { Since: sinceTime, Tail: c.Tail, Timestamps: c.Timestamps, + UseName: c.UseName, } return runtime.Log(c, options) } diff --git a/cni/87-podman-bridge.conflist b/cni/87-podman-bridge.conflist index cd01b97ce..13b09a5b5 100644 --- a/cni/87-podman-bridge.conflist +++ b/cni/87-podman-bridge.conflist @@ -27,6 +27,9 @@ } }, { + "type": "firewall" + }, + { "type": "tuning" } ] diff --git a/contrib/cirrus/integration_test.sh b/contrib/cirrus/integration_test.sh index d5e6ec884..20e067c93 100755 --- a/contrib/cirrus/integration_test.sh +++ b/contrib/cirrus/integration_test.sh @@ -16,16 +16,6 @@ fi cd "$GOSRC" -# Transition workaround: runc is still the default for upstream development -handle_crun() { - # For systems with crun installed, assume CgroupsV2 and use it - if type -P crun &> /dev/null - then - warn "Replacing runc -> crun in libpod.conf" - sed -i -r -e 's/^runtime = "runc"/runtime = "crun"/' /usr/share/containers/libpod.conf - fi -} - case "$SPECIALMODE" in in_podman) ${CONTAINER_RUNTIME} run --rm --privileged --net=host \ @@ -49,7 +39,6 @@ case "$SPECIALMODE" in endpoint) make make install PREFIX=/usr ETCDIR=/etc - #handle_crun make test-binaries make endpoint ;; @@ -63,7 +52,6 @@ case "$SPECIALMODE" in make install PREFIX=/usr ETCDIR=/etc make install.config PREFIX=/usr make test-binaries - handle_crun if [[ "$TEST_REMOTE_CLIENT" == "true" ]] then make remote${TESTSUITE} VARLINK_LOG=$VARLINK_LOG diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index 71ad67c74..1ffe554e9 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -88,6 +88,7 @@ ROOTLESS_ENV_RE='(CIRRUS_.+)|(ROOTLESS_.+)|(.+_IMAGE.*)|(.+_BASE)|(.*DIRPATH)|(. SECRET_ENV_RE='(IRCID)|(ACCOUNT)|(GC[EP]..+)|(SSH)' SPECIALMODE="${SPECIALMODE:-none}" +MOD_LIBPOD_CONF="${MOD_LIBPOD_CONF:false}" TEST_REMOTE_CLIENT="${TEST_REMOTE_CLIENT:-false}" export CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-podman} @@ -105,6 +106,8 @@ OS_RELEASE_ID="$(source /etc/os-release; echo $ID)" OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | cut -d '.' -f 1)" # Combined to ease soe usage OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}" +# Type of filesystem used for cgroups +CG_FS_TYPE="$(stat -f -c %T /sys/fs/cgroup)" # Installed into cache-images, supports overrides # by user-data in case of breakage or for debugging. diff --git a/contrib/cirrus/logcollector.sh b/contrib/cirrus/logcollector.sh index 1769e9362..34b88e6ea 100755 --- a/contrib/cirrus/logcollector.sh +++ b/contrib/cirrus/logcollector.sh @@ -56,6 +56,7 @@ case $1 in ) case $OS_RELEASE_ID in fedora*) + cat /etc/fedora-release PKG_LST_CMD='rpm -q --qf=%{N}-%{V}-%{R}-%{ARCH}\n' PKG_NAMES+=(\ container-selinux \ @@ -64,6 +65,7 @@ case $1 in ) ;; ubuntu*) + cat /etc/issue PKG_LST_CMD='dpkg-query --show --showformat=${Package}-${Version}-${Architecture}\n' PKG_NAMES+=(\ cri-o-runc \ @@ -71,6 +73,8 @@ case $1 in ;; *) bad_os_id_ver ;; esac + echo "Kernel: " $(uname -r) + echo "Cgroups: " $(stat -f -c %T /sys/fs/cgroup) # Any not-present packages will be listed as such $PKG_LST_CMD ${PKG_NAMES[@]} | sort -u ;; diff --git a/contrib/cirrus/packer/fedora_setup.sh b/contrib/cirrus/packer/fedora_setup.sh index 591a59a05..20014e5f3 100644 --- a/contrib/cirrus/packer/fedora_setup.sh +++ b/contrib/cirrus/packer/fedora_setup.sh @@ -8,7 +8,7 @@ set -e # Load in library (copied by packer, before this script was run) source /tmp/libpod/$SCRIPT_BASE/lib.sh -req_env_var SCRIPT_BASE PACKER_BUILDER_NAME GOSRC +req_env_var SCRIPT_BASE PACKER_BUILDER_NAME GOSRC FEDORA_BASE_IMAGE OS_RELEASE_ID OS_RELEASE_VER install_ooe @@ -17,9 +17,14 @@ trap "sudo rm -rf $GOPATH" EXIT $BIGTO ooe.sh sudo dnf update -y -echo "Enabling updates-testing repository" -$LILTO ooe.sh sudo dnf install -y 'dnf-command(config-manager)' -$LILTO ooe.sh sudo dnf config-manager --set-enabled updates-testing +# Do not enable update-stesting on the previous Fedora release +if [[ "$FEDORA_BASE_IMAGE" =~ "${OS_RELEASE_ID}-cloud-base-${OS_RELEASE_VER}" ]]; then + warn "Enabling updates-testing repository for image based on $FEDORA_BASE_IMAGE" + $LILTO ooe.sh sudo dnf install -y 'dnf-command(config-manager)' + $LILTO ooe.sh sudo dnf config-manager --set-enabled updates-testing +else + warn "NOT enabling updates-testing repository for image based on $PRIOR_FEDORA_BASE_IMAGE" +fi echo "Installing general build/test dependencies for Fedora '$OS_RELEASE_VER'" REMOVE_PACKAGES=() @@ -98,6 +103,7 @@ case "$OS_RELEASE_VER" in python2-future runc ) + REMOVE_PACKAGES+=(crun) ;; 31) INSTALL_PACKAGES+=(crun) diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index 5364dd510..d2e1b8767 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -6,15 +6,19 @@ source $(dirname $0)/lib.sh req_env_var USER HOME GOSRC SCRIPT_BASE SETUP_MARKER_FILEPATH -show_env_vars - # Ensure this script only executes successfully once and always logs ending timestamp -[[ ! -e "$SETUP_MARKER_FILEPATH" ]] || exit 0 +if [[ -e "$SETUP_MARKER_FILEPATH" ]]; then + show_env_vars + exit 0 +fi + exithandler() { RET=$? echo "." echo "$(basename $0) exit status: $RET" [[ "$RET" -eq "0" ]] && date +%s >> "$SETUP_MARKER_FILEPATH" + show_env_vars + [ "$RET" -eq "0" ]] || warn "Non-zero exit caused by error ABOVE env. var. display." } trap exithandler EXIT @@ -46,42 +50,59 @@ case "${OS_RELEASE_ID}" in # All SELinux distros need this for systemd-in-a-container setsebool container_manage_cgroup true if [[ "$ADD_SECOND_PARTITION" == "true" ]]; then - bash "$SCRIPT_BASE/add_second_partition.sh"; fi + bash "$SCRIPT_BASE/add_second_partition.sh" + fi - if [[ "$OS_RELEASE_VER" == "31" ]]; then - warn "Switching io schedular to deadline to avoid RHBZ 1767539" - warn "aka https://bugzilla.kernel.org/show_bug.cgi?id=205447" - echo "mq-deadline" > /sys/block/sda/queue/scheduler - cat /sys/block/sda/queue/scheduler + warn "Switching io scheduler to 'deadline' to avoid RHBZ 1767539" + warn "aka https://bugzilla.kernel.org/show_bug.cgi?id=205447" + echo "mq-deadline" > /sys/block/sda/queue/scheduler + cat /sys/block/sda/queue/scheduler - warn "Forcing systemd cgroup manager" - X=$(echo "export CGROUP_MANAGER=systemd" | \ - tee -a /etc/environment) && eval "$X" && echo "$X" + warn "Forcing systemd cgroup manager" + X=$(echo "export CGROUP_MANAGER=systemd" | \ + tee -a /etc/environment) && eval "$X" && echo "$X" + ;; + centos) # Current VM is an image-builder-image no local podman/testing + echo "No further setup required for VM image building" + exit 0 + ;; + *) bad_os_id_ver ;; +esac - warn "Testing with crun instead of runc" - X=$(echo "export OCI_RUNTIME=/usr/bin/crun" | \ - tee -a /etc/environment) && eval "$X" && echo "$X" +# Reload to incorporate any changes from above +source "$SCRIPT_BASE/lib.sh" + +case "$CG_FS_TYPE" in + tmpfs) + warn "Forcing testing with runc instead of crun" + X=$(echo "export OCI_RUNTIME=/usr/bin/runc" | \ + tee -a /etc/environment) && eval "$X" && echo "$X" + ;; + cgroup2fs) + # This is necessary since we've built/installed from source, which uses runc as the default. + warn "Forcing testing with crun instead of runc" + X=$(echo "export OCI_RUNTIME=/usr/bin/crun" | \ + tee -a /etc/environment) && eval "$X" && echo "$X" + + if [[ "$MOD_LIBPOD_CONF" == "true" ]]; then + warn "Updating runtime setting in repo. copy of libpod.conf" + sed -i -r -e 's/^runtime = "runc"/runtime = "crun"/' $GOSRC/libpod.conf + git diff $GOSRC/libpod.conf + fi + if [[ "$OS_RELEASE_ID" == "fedora" ]]; then warn "Upgrading to the latest crun" # Normally not something to do for stable testing # but crun is new, and late-breaking fixes may be required # on short notice dnf update -y crun - - #warn "Setting SELinux into Permissive mode" - #setenforce 0 fi ;; - centos) # Current VM is an image-builder-image no local podman/testing - echo "No further setup required for VM image building" - exit 0 + *) + die 110 "Unsure how to handle cgroup filesystem type '$CG_FS_TYPE'" ;; - *) bad_os_id_ver ;; esac -# Reload to incorporate any changes from above -source "$SCRIPT_BASE/lib.sh" - # Must execute before possible setup_rootless() make install.tools diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh index 768137213..7e31c19c6 100755 --- a/hack/get_ci_vm.sh +++ b/hack/get_ci_vm.sh @@ -96,7 +96,7 @@ env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"] keys=[k for k in env if "ENCRYPTED" not in str(env[k])] for k,v in env.items(): v=str(v) - if "ENCRYPTED" not in v: + if "ENCRYPTED" not in v and "ADD_SECOND_PARTITION" not in v: print("{0}=\"{1}\"".format(k, v)), ' } @@ -181,7 +181,7 @@ parse_args(){ [[ -z "$ROOTLESS_USER" ]] || \ ENVS="$ENVS ROOTLESS_USER=$ROOTLESS_USER" - SETUP_CMD="env $ENVS $GOSRC/contrib/cirrus/setup_environment.sh" + SETUP_CMD="env $ENVS ADD_SECOND_PARTITIO=True $GOSRC/contrib/cirrus/setup_environment.sh" VMNAME="${VMNAME:-${USER}-${IMAGE_NAME}}" CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image=${IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $IBI_ARGS $VMNAME" diff --git a/libpod/container.log.go b/libpod/container.log.go index 7c46dde9a..514edb8c8 100644 --- a/libpod/container.log.go +++ b/libpod/container.log.go @@ -41,6 +41,7 @@ func (c *Container) readFromLogFile(options *logs.LogOptions, logChannel chan *l if len(tailLog) > 0 { for _, nll := range tailLog { nll.CID = c.ID() + nll.CName = c.Name() if nll.Since(options.Since) { logChannel <- nll } @@ -63,6 +64,7 @@ func (c *Container) readFromLogFile(options *logs.LogOptions, logChannel chan *l partial = "" } nll.CID = c.ID() + nll.CName = c.Name() if nll.Since(options.Since) { logChannel <- nll } diff --git a/libpod/container_api.go b/libpod/container_api.go index d612341bc..dabbe27dc 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -270,11 +270,6 @@ func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []stri } }() - // if the user is empty, we should inherit the user that the container is currently running with - if user == "" { - user = c.config.User - } - opts := new(ExecOptions) opts.Cmd = cmd opts.CapAdd = capList diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 739026264..63968918c 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -330,7 +330,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { // Add addition groups if c.config.GroupAdd is not empty if len(c.config.Groups) > 0 { - gids, _ := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, nil) + gids, err := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, overrides) + if err != nil { + return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s", c.ID()) + } for _, gid := range gids { g.AddProcessAdditionalGid(gid) } diff --git a/libpod/logs/log.go b/libpod/logs/log.go index bd918abae..200ef3e99 100644 --- a/libpod/logs/log.go +++ b/libpod/logs/log.go @@ -38,6 +38,7 @@ type LogOptions struct { Timestamps bool Multi bool WaitGroup *sync.WaitGroup + UseName bool } // LogLine describes the information for each line of a log @@ -47,6 +48,7 @@ type LogLine struct { Time time.Time Msg string CID string + CName string } // GetLogFile returns an hp tail for a container given options @@ -164,11 +166,16 @@ func getTailLog(path string, tail int) ([]*LogLine, error) { func (l *LogLine) String(options *LogOptions) string { var out string if options.Multi { - cid := l.CID - if len(cid) > 12 { - cid = cid[:12] + if options.UseName { + cname := l.CName + out = fmt.Sprintf("%s ", cname) + } else { + cid := l.CID + if len(cid) > 12 { + cid = cid[:12] + } + out = fmt.Sprintf("%s ", cid) } - out = fmt.Sprintf("%s ", cid) } if options.Timestamps { out += fmt.Sprintf("%s ", l.Time.Format(LogTimeFormat)) diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index 07d38693f..800f89603 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -1252,18 +1252,35 @@ func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, se } + var addGroups []string + var sgids []uint32 + + // if the user is empty, we should inherit the user that the container is currently running with + if user == "" { + user = c.config.User + addGroups = c.config.Groups + } + overrides := c.getUserOverrides() execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, overrides) if err != nil { return nil, err } + if len(addGroups) > 0 { + sgids, err = lookup.GetContainerGroups(addGroups, c.state.Mountpoint, overrides) + if err != nil { + return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s exec session %s", c.ID(), sessionID) + } + } + // If user was set, look it up in the container to get a UID to use on // the host - if user != "" { - sgids := make([]uint32, 0, len(execUser.Sgids)) - for _, sgid := range execUser.Sgids { - sgids = append(sgids, uint32(sgid)) + if user != "" || len(sgids) > 0 { + if user != "" { + for _, sgid := range execUser.Sgids { + sgids = append(sgids, uint32(sgid)) + } } processUser := spec.User{ UID: uint32(execUser.Uid), diff --git a/libpod/pod.go b/libpod/pod.go index 1b4c06c9d..4cdeb1033 100644 --- a/libpod/pod.go +++ b/libpod/pod.go @@ -88,6 +88,7 @@ type PodInspect struct { type PodInspectState struct { CgroupPath string `json:"cgroupPath"` InfraContainerID string `json:"infraContainerID"` + Status string `json:"status"` } // PodContainerInfo keeps information on a container in a pod diff --git a/libpod/pod_api.go b/libpod/pod_api.go index cb04f7411..200732652 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -407,7 +407,10 @@ func (p *Pod) Status() (map[string]define.ContainerStatus, error) { if err != nil { return nil, err } + return containerStatusFromContainers(allCtrs) +} +func containerStatusFromContainers(allCtrs []*Container) (map[string]define.ContainerStatus, error) { // We need to lock all the containers for _, ctr := range allCtrs { ctr.lock.Lock() @@ -443,6 +446,14 @@ func (p *Pod) Inspect() (*PodInspect, error) { if err != nil { return &PodInspect{}, err } + ctrStatuses, err := containerStatusFromContainers(containers) + if err != nil { + return nil, err + } + status, err := CreatePodStatusResults(ctrStatuses) + if err != nil { + return nil, err + } for _, c := range containers { containerStatus := "unknown" // Ignoring possible errors here because we don't want this to be @@ -468,6 +479,7 @@ func (p *Pod) Inspect() (*PodInspect, error) { State: &PodInspectState{ CgroupPath: p.state.CgroupPath, InfraContainerID: infraContainerID, + Status: status, }, Containers: podContainers, } diff --git a/libpod/volume.go b/libpod/volume.go index 1ffed872e..70099d6f4 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -126,3 +126,10 @@ func (v *Volume) GID() int { func (v *Volume) CreatedTime() time.Time { return v.config.CreatedTime } + +// Config returns the volume's configuration. +func (v *Volume) Config() (*VolumeConfig, error) { + config := VolumeConfig{} + err := JSONDeepCopy(v.config, &config) + return &config, err +} diff --git a/pkg/adapter/network.go b/pkg/adapter/network.go index c5bd91534..b25f54a13 100644 --- a/pkg/adapter/network.go +++ b/pkg/adapter/network.go @@ -209,6 +209,7 @@ func (r *LocalRuntime) NetworkCreateBridge(cli *cliconfig.NetworkCreateValues) ( bridge := network.NewHostLocalBridge(bridgeDeviceName, isGateway, false, ipMasq, ipamConfig) plugins = append(plugins, bridge) plugins = append(plugins, network.NewPortMapPlugin()) + plugins = append(plugins, network.NewFirewallPlugin()) // if we find the dnsname plugin, we add configuration for it if network.HasDNSNamePlugin(runtimeConfig.CNIPluginDir) && !cli.DisableDNS { // Note: in the future we might like to allow for dynamic domain names diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go index 7e7e46718..9b10ee890 100644 --- a/pkg/api/handlers/libpod/volumes.go +++ b/pkg/api/handlers/libpod/volumes.go @@ -3,9 +3,11 @@ package libpod import ( "encoding/json" "net/http" + "strings" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/api/handlers" "github.com/containers/libpod/pkg/api/handlers/utils" "github.com/gorilla/schema" @@ -29,7 +31,6 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String())) return } - // decode params from body if err := json.NewDecoder(r.Body).Decode(&input); err != nil { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()")) @@ -49,14 +50,21 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { parsedOptions, err := shared.ParseVolumeOptions(input.Opts) if err != nil { utils.InternalServerError(w, err) + return } volumeOptions = append(volumeOptions, parsedOptions...) } vol, err := runtime.NewVolume(r.Context(), volumeOptions...) if err != nil { utils.InternalServerError(w, err) + return + } + config, err := vol.Config() + if err != nil { + utils.InternalServerError(w, err) + return } - utils.WriteResponse(w, http.StatusOK, vol.Name()) + utils.WriteResponse(w, http.StatusOK, config) } func InspectVolume(w http.ResponseWriter, r *http.Request) { @@ -76,25 +84,46 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { } func ListVolumes(w http.ResponseWriter, r *http.Request) { - //var ( - // runtime = r.Context().Value("runtime").(*libpod.Runtime) - // decoder = r.Context().Value("decoder").(*schema.Decoder) - //) - //query := struct { - // Filter string `json:"filter"` - //}{ - // // override any golang type defaults - //} - // - //if err := decoder.Decode(&query, r.URL.Query()); err != nil { - // utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - // errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String())) - // return - //} - /* - This is all in main in cmd and needs to be extracted from there first. - */ + var ( + decoder = r.Context().Value("decoder").(*schema.Decoder) + err error + runtime = r.Context().Value("runtime").(*libpod.Runtime) + volumeConfigs []*libpod.VolumeConfig + volumeFilters []libpod.VolumeFilter + ) + query := struct { + Filters map[string][]string `schema:"filters"` + }{ + // override any golang type defaults + } + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String())) + return + } + if len(query.Filters) > 0 { + volumeFilters, err = generateVolumeFilters(query.Filters) + if err != nil { + utils.InternalServerError(w, err) + return + } + } + vols, err := runtime.Volumes(volumeFilters...) + if err != nil { + utils.InternalServerError(w, err) + return + } + for _, v := range vols { + config, err := v.Config() + if err != nil { + utils.InternalServerError(w, err) + return + } + volumeConfigs = append(volumeConfigs, config) + } + utils.WriteResponse(w, http.StatusOK, volumeConfigs) } func PruneVolumes(w http.ResponseWriter, r *http.Request) { @@ -133,9 +162,77 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) { vol, err := runtime.LookupVolume(name) if err != nil { utils.VolumeNotFound(w, name, err) + return } if err := runtime.RemoveVolume(r.Context(), vol, query.Force); err != nil { + if errors.Cause(err) == define.ErrVolumeBeingUsed { + utils.Error(w, "volumes being used", http.StatusConflict, err) + return + } utils.InternalServerError(w, err) + return } utils.WriteResponse(w, http.StatusNoContent, "") } + +func generateVolumeFilters(filters map[string][]string) ([]libpod.VolumeFilter, error) { + var vf []libpod.VolumeFilter + for filter, v := range filters { + for _, val := range v { + switch filter { + case "name": + nameVal := val + vf = append(vf, func(v *libpod.Volume) bool { + return nameVal == v.Name() + }) + case "driver": + driverVal := val + vf = append(vf, func(v *libpod.Volume) bool { + return v.Driver() == driverVal + }) + case "scope": + scopeVal := val + vf = append(vf, func(v *libpod.Volume) bool { + return v.Scope() == scopeVal + }) + case "label": + filterArray := strings.SplitN(val, "=", 2) + filterKey := filterArray[0] + var filterVal string + if len(filterArray) > 1 { + filterVal = filterArray[1] + } else { + filterVal = "" + } + vf = append(vf, func(v *libpod.Volume) bool { + for labelKey, labelValue := range v.Labels() { + if labelKey == filterKey && ("" == filterVal || labelValue == filterVal) { + return true + } + } + return false + }) + case "opt": + filterArray := strings.SplitN(val, "=", 2) + filterKey := filterArray[0] + var filterVal string + if len(filterArray) > 1 { + filterVal = filterArray[1] + } else { + filterVal = "" + } + vf = append(vf, func(v *libpod.Volume) bool { + for labelKey, labelValue := range v.Options() { + if labelKey == filterKey && ("" == filterVal || labelValue == filterVal) { + return true + } + } + return false + }) + default: + return nil, errors.Errorf("%q is in an invalid volume filter", filter) + } + } + } + return vf, nil +} diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go index c72b0f817..2930a9567 100644 --- a/pkg/api/handlers/types.go +++ b/pkg/api/handlers/types.go @@ -128,11 +128,16 @@ type CreateContainerConfig struct { NetworkingConfig dockerNetwork.NetworkingConfig } +// swagger:model VolumeCreate type VolumeCreateConfig struct { - Name string `json:"name"` - Driver string `schema:"driver"` - Label map[string]string `schema:"label"` - Opts map[string]string `schema:"opts"` + // New volume's name. Can be left blank + Name string `schema:"name"` + // Volume driver to use + Driver string `schema:"driver"` + // User-defined key/value metadata. + Label map[string]string `schema:"label"` + // Mapping of driver options and values. + Opts map[string]string `schema:"opts"` } type IDResponse struct { diff --git a/pkg/api/server/register_volumes.go b/pkg/api/server/register_volumes.go index efe56a3ad..d1317904b 100644 --- a/pkg/api/server/register_volumes.go +++ b/pkg/api/server/register_volumes.go @@ -11,15 +11,42 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error { // swagger:operation POST /libpod/volumes/create volumes createVolume // --- // summary: Create a volume + // parameters: + // - in: body + // name: create + // description: attributes for creating a container + // schema: + // $ref: "#/definitions/VolumeCreate" + // produces: + // - application/json + // responses: + // '201': + // $ref: "#/responses/VolumeCreateResponse" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/volumes/create"), s.APIHandler(libpod.CreateVolume)).Methods(http.MethodPost) + // swagger:operation POST /libpod/volumes/json volumes listVolumes + // --- + // summary: List volumes + // description: Returns a list of networks // produces: // - application/json + // parameters: + // - in: query + // name: filters + // type: string + // description: | + // JSON encoded value of the filters (a map[string][]string) to process on the networks list. Available filters: + // - driver=<volume-driver-name> Matches volumes based on their driver. + // - label=<key> or label=<key>:<value> Matches volumes based on the presence of a label alone or a label and a value. + // - name=<volume-name> Matches all of volume name. + // - opt=<driver-option> Matches a storage driver options // responses: // '200': - // description: tbd + // "$ref": "#/responses/VolumeList" // '500': // "$ref": "#/responses/InternalError" - r.Handle("/libpod/volumes/create", s.APIHandler(libpod.CreateVolume)).Methods(http.MethodPost) - r.Handle("/libpod/volumes/json", s.APIHandler(libpod.ListVolumes)).Methods(http.MethodGet) + r.Handle(VersionedPath("/libpod/volumes/json"), s.APIHandler(libpod.ListVolumes)).Methods(http.MethodGet) // swagger:operation POST /libpod/volumes/prune volumes pruneVolumes // --- // summary: Prune volumes @@ -30,7 +57,7 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error { // description: no error // '500': // "$ref": "#/responses/InternalError" - r.Handle("/libpod/volumes/prune", s.APIHandler(libpod.PruneVolumes)).Methods(http.MethodPost) + r.Handle(VersionedPath("/libpod/volumes/prune"), s.APIHandler(libpod.PruneVolumes)).Methods(http.MethodPost) // swagger:operation GET /libpod/volumes/{name}/json volumes inspectVolume // --- // summary: Inspect volume @@ -49,7 +76,7 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error { // "$ref": "#/responses/NoSuchVolume" // '500': // "$ref": "#/responses/InternalError" - r.Handle("/libpod/volumes/{name}/json", s.APIHandler(libpod.InspectVolume)).Methods(http.MethodGet) + r.Handle(VersionedPath("/libpod/volumes/{name}/json"), s.APIHandler(libpod.InspectVolume)).Methods(http.MethodGet) // swagger:operation DELETE /libpod/volumes/{name} volumes removeVolume // --- // summary: Remove volume @@ -68,12 +95,12 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error { // responses: // 204: // description: no error - // 400: - // $ref: "#/responses/BadParamError" // 404: // $ref: "#/responses/NoSuchVolume" + // 409: + // description: Volume is in use and cannot be removed // 500: // $ref: "#/responses/InternalError" - r.Handle("/libpod/volumes/{name}", s.APIHandler(libpod.RemoveVolume)).Methods(http.MethodDelete) + r.Handle(VersionedPath("/libpod/volumes/{name}"), s.APIHandler(libpod.RemoveVolume)).Methods(http.MethodDelete) return nil } diff --git a/pkg/api/server/swagger.go b/pkg/api/server/swagger.go index fc409d816..011196e5a 100644 --- a/pkg/api/server/swagger.go +++ b/pkg/api/server/swagger.go @@ -1,6 +1,7 @@ package server import ( + "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/api/handlers" "github.com/containers/libpod/pkg/api/handlers/utils" ) @@ -139,3 +140,19 @@ type ok struct { ok string } } + +// Volume create response +// swagger:response VolumeCreateResponse +type swagVolumeCreateResponse struct { + // in:body + Body struct { + libpod.VolumeConfig + } +} + +// Volume list +// swagger:response VolumeList +type swagVolumeListResponse struct { + // in:body + Body []libpod.Volume +} diff --git a/pkg/bindings/containers/create.go b/pkg/bindings/containers/create.go index 2943cb522..43a3ef02d 100644 --- a/pkg/bindings/containers/create.go +++ b/pkg/bindings/containers/create.go @@ -19,7 +19,7 @@ func CreateWithSpec(ctx context.Context, s specgen.SpecGenerator) (utils.Contain } specgenString, err := jsoniter.MarshalToString(s) if err != nil { - return ccr, nil + return ccr, err } stringReader := strings.NewReader(specgenString) response, err := conn.DoRequest(stringReader, http.MethodPost, "/containers/create", nil) diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go index 38f5014ca..1fc774074 100644 --- a/pkg/bindings/test/common_test.go +++ b/pkg/bindings/test/common_test.go @@ -240,3 +240,7 @@ func createCache() { } b.cleanup() } + +func isStopped(state string) bool { + return state == "exited" || state == "stopped" +} diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go index 6756e81c7..5a0bdebe6 100644 --- a/pkg/bindings/test/containers_test.go +++ b/pkg/bindings/test/containers_test.go @@ -232,7 +232,7 @@ var _ = Describe("Podman containers ", func() { // Ensure container is stopped data, err := containers.Inspect(connText, name, nil) Expect(err).To(BeNil()) - Expect(data.State.Status).To(Equal("exited")) + Expect(isStopped(data.State.Status)).To(BeTrue()) }) It("podman stop a running container by ID", func() { @@ -247,7 +247,7 @@ var _ = Describe("Podman containers ", func() { // Ensure container is stopped data, err = containers.Inspect(connText, name, nil) Expect(err).To(BeNil()) - Expect(data.State.Status).To(Equal("exited")) + Expect(isStopped(data.State.Status)).To(BeTrue()) }) }) diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go index 4bea2f8d7..afffee4e6 100644 --- a/pkg/bindings/test/pods_test.go +++ b/pkg/bindings/test/pods_test.go @@ -13,7 +13,7 @@ import ( "github.com/onsi/gomega/gexec" ) -var _ = Describe("Podman images", func() { +var _ = Describe("Podman pods", func() { var ( bt *bindingTest s *gexec.Session @@ -120,6 +120,7 @@ var _ = Describe("Podman images", func() { err = pods.Pause(connText, newpod) Expect(err).To(BeNil()) response, err = pods.Inspect(connText, newpod) + Expect(response.State.Status).To(Equal(define.PodStatePaused)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). To(Equal(define.ContainerStatePaused)) @@ -129,6 +130,7 @@ var _ = Describe("Podman images", func() { err = pods.Unpause(connText, newpod) Expect(err).To(BeNil()) response, err = pods.Inspect(connText, newpod) + Expect(response.State.Status).To(Equal(define.PodStateRunning)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). To(Equal(define.ContainerStateRunning)) @@ -159,6 +161,7 @@ var _ = Describe("Podman images", func() { Expect(err).To(BeNil()) response, err := pods.Inspect(connText, newpod) + Expect(response.State.Status).To(Equal(define.PodStateRunning)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). To(Equal(define.ContainerStateRunning)) @@ -172,6 +175,7 @@ var _ = Describe("Podman images", func() { err = pods.Stop(connText, newpod, nil) Expect(err).To(BeNil()) response, _ = pods.Inspect(connText, newpod) + Expect(response.State.Status).To(Equal(define.PodStateExited)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). To(Equal(define.ContainerStateStopped)) @@ -184,13 +188,66 @@ var _ = Describe("Podman images", func() { err = pods.Restart(connText, newpod) Expect(err).To(BeNil()) response, _ = pods.Inspect(connText, newpod) + Expect(response.State.Status).To(Equal(define.PodStateRunning)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). To(Equal(define.ContainerStateRunning)) } }) - // Remove all stopped pods and their container to be implemented. + // Test to validate all the pods in the stopped/exited state are pruned sucessfully. It("prune pod", func() { + // Add a new pod + var newpod2 string = "newpod2" + bt.Podcreate(&newpod2) + // No pods pruned since no pod in exited state + err = pods.Prune(connText) + Expect(err).To(BeNil()) + podSummary, err := pods.List(connText, nil) + Expect(err).To(BeNil()) + Expect(len(podSummary)).To(Equal(2)) + + // Prune only one pod which is in exited state. + // Start then stop a pod. + // pod moves to exited state one pod should be pruned now. + err = pods.Start(connText, newpod) + Expect(err).To(BeNil()) + err = pods.Stop(connText, newpod, nil) + Expect(err).To(BeNil()) + response, err := pods.Inspect(connText, newpod) + Expect(response.State.Status).To(Equal(define.PodStateExited)) + err = pods.Prune(connText) + Expect(err).To(BeNil()) + podSummary, err = pods.List(connText, nil) + Expect(err).To(BeNil()) + Expect(len(podSummary)).To(Equal(1)) + + // Test prune all pods in exited state. + bt.Podcreate(&newpod) + err = pods.Start(connText, newpod) + Expect(err).To(BeNil()) + err = pods.Start(connText, newpod2) + Expect(err).To(BeNil()) + err = pods.Stop(connText, newpod, nil) + Expect(err).To(BeNil()) + response, err = pods.Inspect(connText, newpod) + Expect(response.State.Status).To(Equal(define.PodStateExited)) + for _, i := range response.Containers { + Expect(define.StringToContainerStatus(i.State)). + To(Equal(define.ContainerStateStopped)) + } + err = pods.Stop(connText, newpod2, nil) + Expect(err).To(BeNil()) + response, err = pods.Inspect(connText, newpod2) + Expect(response.State.Status).To(Equal(define.PodStateExited)) + for _, i := range response.Containers { + Expect(define.StringToContainerStatus(i.State)). + To(Equal(define.ContainerStateStopped)) + } + err = pods.Prune(connText) + Expect(err).To(BeNil()) + podSummary, err = pods.List(connText, nil) + Expect(err).To(BeNil()) + Expect(len(podSummary)).To(Equal(0)) }) }) diff --git a/pkg/bindings/test/volumes_test.go b/pkg/bindings/test/volumes_test.go new file mode 100644 index 000000000..c8940d46e --- /dev/null +++ b/pkg/bindings/test/volumes_test.go @@ -0,0 +1,174 @@ +package test_bindings + +import ( + "context" + "fmt" + "github.com/containers/libpod/pkg/api/handlers" + "github.com/containers/libpod/pkg/bindings/containers" + "github.com/containers/libpod/pkg/bindings/volumes" + "net/http" + "time" + + "github.com/containers/libpod/pkg/bindings" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" +) + +var _ = Describe("Podman volumes", func() { + var ( + //tempdir string + //err error + //podmanTest *PodmanTestIntegration + bt *bindingTest + s *gexec.Session + connText context.Context + err error + trueFlag = true + ) + + BeforeEach(func() { + //tempdir, err = CreateTempDirInTempDir() + //if err != nil { + // os.Exit(1) + //} + //podmanTest = PodmanTestCreate(tempdir) + //podmanTest.Setup() + //podmanTest.SeedImages() + bt = newBindingTest() + bt.RestoreImagesFromCache() + s = bt.startAPIService() + time.Sleep(1 * time.Second) + connText, err = bindings.NewConnection(context.Background(), bt.sock) + Expect(err).To(BeNil()) + }) + + AfterEach(func() { + //podmanTest.Cleanup() + //f := CurrentGinkgoTestDescription() + //processTestResult(f) + s.Kill() + bt.cleanup() + }) + + It("create volume", func() { + // create a volume with blank config should work + _, err := volumes.Create(connText, handlers.VolumeCreateConfig{}) + Expect(err).To(BeNil()) + + vcc := handlers.VolumeCreateConfig{ + Name: "foobar", + Label: nil, + Opts: nil, + } + vol, err := volumes.Create(connText, vcc) + Expect(err).To(BeNil()) + Expect(vol.Name).To(Equal("foobar")) + + // create volume with same name should 500 + _, err = volumes.Create(connText, vcc) + Expect(err).ToNot(BeNil()) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) + }) + + It("inspect volume", func() { + vol, err := volumes.Create(connText, handlers.VolumeCreateConfig{}) + Expect(err).To(BeNil()) + data, err := volumes.Inspect(connText, vol.Name) + Expect(err).To(BeNil()) + Expect(data.Name).To(Equal(vol.Name)) + }) + + It("remove volume", func() { + // removing a bogus volume should result in 404 + err := volumes.Remove(connText, "foobar", nil) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusNotFound)) + + // Removing an unused volume should work + vol, err := volumes.Create(connText, handlers.VolumeCreateConfig{}) + Expect(err).To(BeNil()) + err = volumes.Remove(connText, vol.Name, nil) + Expect(err).To(BeNil()) + + // Removing a volume that is being used without force should be 409 + vol, err = volumes.Create(connText, handlers.VolumeCreateConfig{}) + Expect(err).To(BeNil()) + session := bt.runPodman([]string{"run", "-dt", "-v", fmt.Sprintf("%s:/foobar", vol.Name), "--name", "vtest", alpine.name, "top"}) + session.Wait(45) + err = volumes.Remove(connText, vol.Name, nil) + Expect(err).ToNot(BeNil()) + code, _ = bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusConflict)) + + // Removing with a volume in use with force should work with a stopped container + zero := 0 + err = containers.Stop(connText, "vtest", &zero) + Expect(err).To(BeNil()) + err = volumes.Remove(connText, vol.Name, &trueFlag) + Expect(err).To(BeNil()) + }) + + It("list volumes", func() { + // no volumes should be ok + vols, err := volumes.List(connText, nil) + Expect(err).To(BeNil()) + Expect(len(vols)).To(BeZero()) + + // create a bunch of named volumes and make verify with list + volNames := []string{"homer", "bart", "lisa", "maggie", "marge"} + for i := 0; i < 5; i++ { + _, err = volumes.Create(connText, handlers.VolumeCreateConfig{Name: volNames[i]}) + Expect(err).To(BeNil()) + } + vols, err = volumes.List(connText, nil) + Expect(err).To(BeNil()) + Expect(len(vols)).To(BeNumerically("==", 5)) + for _, v := range vols { + Expect(StringInSlice(v.Name, volNames)).To(BeTrue()) + } + + // list with bad filter should be 500 + filters := make(map[string][]string) + filters["foobar"] = []string{"1234"} + _, err = volumes.List(connText, filters) + Expect(err).ToNot(BeNil()) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) + + filters = make(map[string][]string) + filters["name"] = []string{"homer"} + vols, err = volumes.List(connText, filters) + Expect(err).To(BeNil()) + Expect(len(vols)).To(BeNumerically("==", 1)) + Expect(vols[0].Name).To(Equal("homer")) + }) + + // TODO we need to add filtering to tests + It("prune unused volume", func() { + // Pruning when no volumes present should be ok + _, err := volumes.Prune(connText) + Expect(err).To(BeNil()) + + // Removing an unused volume should work + _, err = volumes.Create(connText, handlers.VolumeCreateConfig{}) + Expect(err).To(BeNil()) + vols, err := volumes.Prune(connText) + Expect(err).To(BeNil()) + Expect(len(vols)).To(BeNumerically("==", 1)) + + _, err = volumes.Create(connText, handlers.VolumeCreateConfig{Name: "homer"}) + Expect(err).To(BeNil()) + _, err = volumes.Create(connText, handlers.VolumeCreateConfig{}) + Expect(err).To(BeNil()) + session := bt.runPodman([]string{"run", "-dt", "-v", fmt.Sprintf("%s:/homer", "homer"), "--name", "vtest", alpine.name, "top"}) + session.Wait(45) + vols, err = volumes.Prune(connText) + Expect(err).To(BeNil()) + Expect(len(vols)).To(BeNumerically("==", 1)) + _, err = volumes.Inspect(connText, "homer") + Expect(err).To(BeNil()) + }) + +}) diff --git a/pkg/bindings/volumes/volumes.go b/pkg/bindings/volumes/volumes.go index 7f6a9cc9b..0bc818605 100644 --- a/pkg/bindings/volumes/volumes.go +++ b/pkg/bindings/volumes/volumes.go @@ -5,27 +5,33 @@ import ( "net/http" "net/url" "strconv" + "strings" "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/api/handlers" "github.com/containers/libpod/pkg/bindings" + jsoniter "github.com/json-iterator/go" ) // Create creates a volume given its configuration. -func Create(ctx context.Context, config handlers.VolumeCreateConfig) (string, error) { - // TODO This is incomplete. The config needs to be sent via the body +func Create(ctx context.Context, config handlers.VolumeCreateConfig) (*libpod.VolumeConfig, error) { var ( - volumeID string + v libpod.VolumeConfig ) conn, err := bindings.GetClient(ctx) if err != nil { - return "", err + return nil, err + } + createString, err := jsoniter.MarshalToString(config) + if err != nil { + return nil, err } - response, err := conn.DoRequest(nil, http.MethodPost, "/volumes/create", nil) + stringReader := strings.NewReader(createString) + response, err := conn.DoRequest(stringReader, http.MethodPost, "/volumes/create", nil) if err != nil { - return volumeID, err + return nil, err } - return volumeID, response.Process(&volumeID) + return &v, response.Process(&v) } // Inspect returns low-level information about a volume. @@ -37,18 +43,36 @@ func Inspect(ctx context.Context, nameOrID string) (*libpod.InspectVolumeData, e if err != nil { return nil, err } - response, err := conn.DoRequest(nil, http.MethodPost, "/volumes/%s/json", nil, nameOrID) + response, err := conn.DoRequest(nil, http.MethodGet, "/volumes/%s/json", nil, nameOrID) if err != nil { return &inspect, err } return &inspect, response.Process(&inspect) } -func List() error { - // TODO - // The API side of things for this one does a lot in main and therefore - // is not implemented yet. - return bindings.ErrNotImplemented // nolint:typecheck +// List returns the configurations for existing volumes in the form of a slice. Optionally, filters +// can be used to refine the list of volumes. +func List(ctx context.Context, filters map[string][]string) ([]*libpod.VolumeConfig, error) { + var ( + vols []*libpod.VolumeConfig + ) + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params := url.Values{} + if len(filters) > 0 { + strFilters, err := bindings.FiltersToString(filters) + if err != nil { + return nil, err + } + params.Set("filters", strFilters) + } + response, err := conn.DoRequest(nil, http.MethodGet, "/volumes/json", params) + if err != nil { + return vols, err + } + return vols, response.Process(&vols) } // Prune removes unused volumes from the local filesystem. @@ -78,7 +102,7 @@ func Remove(ctx context.Context, nameOrID string, force *bool) error { if force != nil { params.Set("force", strconv.FormatBool(*force)) } - response, err := conn.DoRequest(nil, http.MethodPost, "/volumes/%s/prune", params, nameOrID) + response, err := conn.DoRequest(nil, http.MethodDelete, "/volumes/%s", params, nameOrID) if err != nil { return err } diff --git a/pkg/network/netconflist.go b/pkg/network/netconflist.go index a8217097a..34ff00024 100644 --- a/pkg/network/netconflist.go +++ b/pkg/network/netconflist.go @@ -110,7 +110,6 @@ func NewPortMapPlugin() PortMapConfig { func NewFirewallPlugin() FirewallConfig { return FirewallConfig{ PluginType: "firewall", - Backend: "iptables", } } diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go index ed4eb3335..ab806f683 100644 --- a/test/e2e/exec_test.go +++ b/test/e2e/exec_test.go @@ -1,6 +1,7 @@ package integration import ( + "fmt" "os" "strings" @@ -244,4 +245,27 @@ var _ = Describe("Podman exec", func() { Expect(session.ExitCode()).To(Equal(0)) }) + It("podman exec preserves --group-add groups", func() { + groupName := "group1" + gid := "4444" + ctrName1 := "ctr1" + ctr1 := podmanTest.Podman([]string{"run", "-ti", "--name", ctrName1, fedoraMinimal, "groupadd", "-g", gid, groupName}) + ctr1.WaitWithDefaultTimeout() + Expect(ctr1.ExitCode()).To(Equal(0)) + + imgName := "img1" + commit := podmanTest.Podman([]string{"commit", ctrName1, imgName}) + commit.WaitWithDefaultTimeout() + Expect(commit.ExitCode()).To(Equal(0)) + + ctrName2 := "ctr2" + ctr2 := podmanTest.Podman([]string{"run", "-d", "--name", ctrName2, "--group-add", groupName, imgName, "sleep", "300"}) + ctr2.WaitWithDefaultTimeout() + Expect(ctr2.ExitCode()).To(Equal(0)) + + exec := podmanTest.Podman([]string{"exec", "-ti", ctrName2, "id"}) + exec.WaitWithDefaultTimeout() + Expect(exec.ExitCode()).To(Equal(0)) + Expect(strings.Contains(exec.OutputToString(), fmt.Sprintf("%s(%s)", gid, groupName))).To(BeTrue()) + }) }) |