diff options
author | OpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com> | 2020-08-24 18:35:59 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-08-24 18:35:59 -0400 |
commit | 024f47068a894907ee47b1b90c86090a00586940 (patch) | |
tree | 4528b3566c3162ca2892b8c3d752d3957161e3d5 | |
parent | c78c6b44ce63430218e141415a10b2010d42f883 (diff) | |
parent | 13d5b2d661478005a0b7eec5563d13580b6cd3f8 (diff) | |
download | podman-024f47068a894907ee47b1b90c86090a00586940.tar.gz podman-024f47068a894907ee47b1b90c86090a00586940.tar.bz2 podman-024f47068a894907ee47b1b90c86090a00586940.zip |
Merge pull request #7402 from mheon/last_pr_before_205_really_this_time
Final v2.0.5 backports
-rw-r--r-- | .cirrus.yml | 50 | ||||
-rw-r--r-- | RELEASE_NOTES.md | 2 | ||||
-rw-r--r-- | changelog.txt | 80 | ||||
-rw-r--r-- | contrib/spec/podman.spec.in | 2 | ||||
-rw-r--r-- | libpod/define/errors.go | 4 | ||||
-rw-r--r-- | libpod/options.go | 20 | ||||
-rw-r--r-- | libpod/pod.go | 11 | ||||
-rw-r--r-- | libpod/pod_api.go | 134 | ||||
-rw-r--r-- | libpod/runtime_pod_infra_linux.go | 6 | ||||
-rw-r--r-- | pkg/api/handlers/libpod/pods.go | 9 | ||||
-rw-r--r-- | pkg/bindings/test/pods_test.go | 6 | ||||
-rw-r--r-- | pkg/specgen/container_validate.go | 18 | ||||
-rw-r--r-- | pkg/specgen/generate/pod_create.go | 16 | ||||
-rw-r--r-- | test/e2e/create_test.go | 61 | ||||
-rw-r--r-- | vendor/github.com/containers/storage/drivers/counter.go | 4 | ||||
-rw-r--r-- | version/version.go | 2 |
16 files changed, 356 insertions, 69 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 87c135b78..06ebb0c96 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -204,34 +204,34 @@ rpmbuild_task: # whether the git tree is clean. The reasoning for that is to make sure # that the vendor.conf, the code and the vendored packages in ./vendor are # in sync at all times. -vendor_task: +# vendor_task: - only_if: >- - $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && - $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' +# only_if: >- +# $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && +# $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*' - depends_on: - - "gating" +# depends_on: +# - "gating" - env: - CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/libpod" - GOPATH: "/var/tmp/go" - GOSRC: "$CIRRUS_WORKING_DIR" +# env: +# CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/libpod" +# GOPATH: "/var/tmp/go" +# GOSRC: "$CIRRUS_WORKING_DIR" - # Runs within Cirrus's "community cluster" - container: - image: docker.io/library/golang:1.13 - cpu: 4 - memory: 12 +# # Runs within Cirrus's "community cluster" +# container: +# image: docker.io/library/golang:1.13 +# cpu: 4 +# memory: 12 - timeout_in: 30m +# timeout_in: 30m - vendor_script: - - 'cd ${CIRRUS_WORKING_DIR} && make vendor' - - 'cd ${CIRRUS_WORKING_DIR} && ./hack/tree_status.sh' +# vendor_script: +# - 'cd ${CIRRUS_WORKING_DIR} && make vendor' +# - 'cd ${CIRRUS_WORKING_DIR} && ./hack/tree_status.sh' - on_failure: - failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh |& ${TIMESTAMP}' +# on_failure: +# failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh |& ${TIMESTAMP}' # This task runs `make varlink_api_generate` followed by ./hack/tree_status.sh to check @@ -305,7 +305,6 @@ build_without_cgo_task: depends_on: - "gating" - - "vendor" - "varlink_api" only_if: >- @@ -366,7 +365,6 @@ testing_task: alias: "testing" depends_on: - "gating" - - "vendor" - "varlink_api" - "build_without_cgo" - "container_image_build" @@ -431,7 +429,6 @@ special_testing_rootless_task: depends_on: - "gating" - "varlink_api" - - "vendor" - "build_without_cgo" only_if: >- @@ -466,7 +463,6 @@ special_testing_in_podman_task: depends_on: - "gating" - "varlink_api" - - "vendor" - "build_without_cgo" only_if: >- @@ -505,7 +501,6 @@ special_testing_cross_task: depends_on: - "gating" - "varlink_api" - - "vendor" only_if: >- $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && @@ -543,7 +538,6 @@ special_testing_bindings_task: depends_on: - "gating" - "varlink_api" - - "vendor" only_if: >- $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && @@ -570,7 +564,6 @@ special_testing_endpoint_task: depends_on: - "gating" - "varlink_api" - - "vendor" only_if: >- $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' && @@ -695,7 +688,6 @@ success_task: # ignores any dependent task conditions depends_on: - "gating" - - "vendor" - "varlink_api" - "build_without_cgo" - "container_image_build" diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index b5eea154e..f4ee06d1d 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -39,6 +39,8 @@ - Fixed a bug where the `podman generate systemd --new` command could incorrectly quote arguments to Podman that contained whitespace, leading to nonfunctional unit files ([#7285](https://github.com/containers/podman/issues/7285)). - Fixed a bug where the `podman version` command did not properly include build time and Git commit. - Fixed a bug where running systemd in a Podman container on a system that did not use the `systemd` cgroup manager would fail ([#6734](https://github.com/containers/podman/issues/6734)). +- Fixed a bug where capabilities from `--cap-add` were not properly added when a container was started as a non-root user via `--user`. +- Fixed a bug where Pod infra containers were not properly cleaned up when they stopped, causing networking issues ([#7103](https://github.com/containers/podman/issues/7103)). ### API - Fixed a bug where the libpod and compat Build endpoints did not accept the `application/tar` content type (instead only accepting `application/x-tar`) ([#7185](https://github.com/containers/podman/issues/7185)). diff --git a/changelog.txt b/changelog.txt index b44dfffcb..199baef65 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,83 @@ +- Changelog for v2.0.5 (2020-08-24): + * HACK: Manually include c/storage #698 + * Final release notes update for v2.0.5. + * Clean up pods before returning from Pod Stop API call + * Ensure pod infra containers have an exit command + * error when adding container to pod with network information + * Vendor in containers/common v0.14.9 + * In podman 1.* regression on --cap-add + * fix pod creation with "new:" syntax followup + allow hostname + * Fix a Makefile issue + * Fix a system test failure + * Cleanup handling of podman mount/unmount + * Fix imports (podman -> libpod for v2.0 branch) + * Final set of updates to release notes + * Add support for --connection + * remove --latest for all remote commands + * Further release notes updates for v2.0.5 + * fix podman create/run UTS NS docs + * abi: fix detection for systemd + * fix podman version output to include git commit and builttime + * generate systemd: quote arguments with whitespace + * Unmount c/storage containers before removing them + * [WIP] Refactor podman system connection + * Fix `podman system connection` panic + * Revert "remove podman system connection" + * Bump github.com/containers/common to v0.14.7 + * Fix imports for runtime_img.go + * Fix one import path pointing to containers/podman + * HACK: Disable build-each-commit + * Ensure DefaultEnvVariables is used in Specgen + * Update release notes for v2.0.5 + * [CI:DOCS] BZ1860126 - Fix userns defaults in run man page + * Unconditionally retrieve pod names via API + * Default .Repository and .Tag values to <none> + * Error pass through for more accurate error reporting + * Fix handling of working dir + * Do not use image CMD if user gave ENTRYPOINT + * Ensure WORKDIR from images is created + * Allow specifying seccomp profiles for privileged containers + * Use set for systemd commands + * Enable systemd mode for /usr/local/sbin/init + * Replace deepcopy on history results + * Add parameter verification for api creation network + * add event for image build + * Change /sys/fs/cgroup/systemd mount to rprivate + * podman save use named pipe + * Fix hang when `path` doesn't exist + * podman.service: use sdnotiy + * podman support for IPv6 networks + * vendor c/image v5.5.2 + * Fix v2.0.x CI + * system tests: invoke with abs path to podman + * Make changes to /etc/passwd on disk for non-read only + * Add username to /etc/passwd inside of container if --userns keep-id + * Fix close fds of exec --preserve-fds + * fix pod creation with "new:" syntax + * Fix podman service --valink timeout + * Add versioned _ping endpoint + * Change recommended systemd unit path for root. + * API returns 500 in case network is not found instead of 404 + * podman.service: drop install section + * Handle podman-remote run --rm + * correct go-binding key for volumes + * cherry-pick: Reenable remote system tests + * system tests: new tests for run, exec + * implement the exitcode when start a container with attach + * Do not set host IP on ports when 0.0.0.0 requested + * Missing return after early exit + * docker-compose uses application/tar + * rootless: system service joins immediately the namespaces + * fix bug podman sign storage path + * podman-remote send name and tag + * Ensure that exec errors write exit codes to the DB + * fix podman logs --tail when log is bigger than pagesize + * image list: speed up + * generate systemd: fix error handling + * Publish IP from YAML (podman play kube) + * Add containers.conf default file for windows and MAC Installs + * Bump Buildah to v1.15.1 on v2.0 branch + - Changelog for v2.0.4 (2020-07-31): * Update release notes for v2.0.4 * Disable a nonfunctional build test diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in index 787309f09..8414b70c9 100644 --- a/contrib/spec/podman.spec.in +++ b/contrib/spec/podman.spec.in @@ -42,7 +42,7 @@ Epoch: 99 %else Epoch: 0 %endif -Version: 2.0.5 +Version: 2.0.6 Release: #COMMITDATE#.git%{shortcommit0}%{?dist} Summary: Manage Pods, Containers and Container Images License: ASL 2.0 diff --git a/libpod/define/errors.go b/libpod/define/errors.go index 5cadce396..e27626fcc 100644 --- a/libpod/define/errors.go +++ b/libpod/define/errors.go @@ -160,4 +160,8 @@ var ( // ErrImageInUse indicates the requested operation failed because the image was in use ErrImageInUse = errors.New("image is being used") + + // ErrNetworkOnPodContainer indicates the user wishes to alter network attributes on a container + // in a pod. This cannot be done as the infra container has all the network information + ErrNetworkOnPodContainer = errors.New("network cannot be configured when it is shared with a pod") ) diff --git a/libpod/options.go b/libpod/options.go index a4e4b99e9..45775e73b 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -2036,3 +2036,23 @@ func WithPodHostNetwork() PodCreateOption { return nil } } + +// WithPodInfraExitCommand sets an exit command for the pod's infra container. +// Semantics are identical to WithExitCommand() above - the ID of the container +// will be appended to the end of the provided command (note that this will +// specifically be the ID of the infra container *and not the pod's id*. +func WithPodInfraExitCommand(exitCmd []string) PodCreateOption { + return func(pod *Pod) error { + if pod.valid { + return define.ErrPodFinalized + } + + if !pod.config.InfraContainer.HasInfraContainer { + return errors.Wrapf(define.ErrInvalidArg, "cannot configure pod infra container exit command as no infra container is being created") + } + + pod.config.InfraContainer.ExitCommand = exitCmd + + return nil + } +} diff --git a/libpod/pod.go b/libpod/pod.go index 00ba5d53c..4ce66b751 100644 --- a/libpod/pod.go +++ b/libpod/pod.go @@ -81,7 +81,15 @@ type podState struct { InfraContainerID string } -// InfraContainerConfig is the configuration for the pod's infra container +// InfraContainerConfig is the configuration for the pod's infra container. +// Generally speaking, these are equivalent to container configuration options +// you will find in container_config.go (and even named identically), save for +// HasInfraContainer (which determines if an infra container is even created - +// if it is false, no other options in this struct will be used) and HostNetwork +// (this involves the created OCI spec, and as such is not represented directly +// in container_config.go). +// Generally speaking, aside from those two exceptions, these options will set +// the equivalent field in the container's configuration. type InfraContainerConfig struct { ConmonPidFile string `json:"conmonPidFile"` HasInfraContainer bool `json:"makeInfraContainer"` @@ -96,6 +104,7 @@ type InfraContainerConfig struct { UseImageHosts bool `json:"useImageHosts,omitempty"` HostAdd []string `json:"hostsAdd,omitempty"` Networks []string `json:"networks,omitempty"` + ExitCommand []string `json:"exitCommand,omitempty"` } // ID retrieves the pod's ID diff --git a/libpod/pod_api.go b/libpod/pod_api.go index f2ef81bec..3316b3b31 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -11,20 +11,20 @@ import ( "github.com/sirupsen/logrus" ) -// Start starts all containers within a pod -// It combines the effects of Init() and Start() on a container +// Start starts all containers within a pod. +// It combines the effects of Init() and Start() on a container. // If a container has already been initialized it will be started, // otherwise it will be initialized then started. // Containers that are already running or have been paused are ignored // All containers are started independently, in order dictated by their // dependencies. -// An error and a map[string]error are returned +// An error and a map[string]error are returned. // If the error is not nil and the map is nil, an error was encountered before -// any containers were started +// any containers were started. // If map is not nil, an error was encountered when starting one or more // containers. The container ID is mapped to the error encountered. The error is -// set to ErrCtrExists -// If both error and the map are nil, all containers were started successfully +// set to ErrPodPartialFail. +// If both error and the map are nil, all containers were started successfully. func (p *Pod) Start(ctx context.Context) (map[string]error, error) { p.lock.Lock() defer p.lock.Unlock() @@ -72,20 +72,20 @@ func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error) } // StopWithTimeout stops all containers within a pod that are not already stopped -// Each container will use its own stop timeout +// Each container will use its own stop timeout. // Only running containers will be stopped. Paused, stopped, or created // containers will be ignored. // If cleanup is true, mounts and network namespaces will be cleaned up after // the container is stopped. // All containers are stopped independently. An error stopping one container // will not prevent other containers being stopped. -// An error and a map[string]error are returned +// An error and a map[string]error are returned. // If the error is not nil and the map is nil, an error was encountered before -// any containers were stopped +// any containers were stopped. // If map is not nil, an error was encountered when stopping one or more // containers. The container ID is mapped to the error encountered. The error is -// set to ErrCtrExists -// If both error and the map are nil, all containers were stopped without error +// set to ErrPodPartialFail. +// If both error and the map are nil, all containers were stopped without error. func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) { p.lock.Lock() defer p.lock.Unlock() @@ -138,10 +138,82 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m ctr.lock.Unlock() } + p.newPodEvent(events.Stop) + if len(ctrErrors) > 0 { return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers") } - defer p.newPodEvent(events.Stop) + return nil, nil +} + +// Cleanup cleans up all containers within a pod that have stopped. +// All containers are cleaned up independently. An error with one container will +// not prevent other containers being cleaned up. +// An error and a map[string]error are returned. +// If the error is not nil and the map is nil, an error was encountered before +// any containers were cleaned up. +// If map is not nil, an error was encountered when working on one or more +// containers. The container ID is mapped to the error encountered. The error is +// set to ErrPodPartialFail. +// If both error and the map are nil, all containers were paused without error +func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) { + p.lock.Lock() + defer p.lock.Unlock() + + if !p.valid { + return nil, define.ErrPodRemoved + } + + allCtrs, err := p.runtime.state.PodContainers(p) + if err != nil { + return nil, err + } + + ctrErrors := make(map[string]error) + + // Clean up all containers + for _, ctr := range allCtrs { + ctr.lock.Lock() + + if err := ctr.syncContainer(); err != nil { + ctr.lock.Unlock() + ctrErrors[ctr.ID()] = err + continue + } + + // Ignore containers that are running/paused + if !ctr.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) { + ctr.lock.Unlock() + continue + } + + // Check for running exec sessions, ignore containers with them. + sessions, err := ctr.getActiveExecSessions() + if err != nil { + ctr.lock.Unlock() + ctrErrors[ctr.ID()] = err + continue + } + if len(sessions) > 0 { + ctr.lock.Unlock() + continue + } + + // TODO: Should we handle restart policy here? + + ctr.newContainerEvent(events.Cleanup) + + if err := ctr.cleanup(ctx); err != nil { + ctrErrors[ctr.ID()] = err + } + + ctr.lock.Unlock() + } + + if len(ctrErrors) > 0 { + return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers") + } + return nil, nil } @@ -150,12 +222,12 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m // containers will be ignored. // All containers are paused independently. An error pausing one container // will not prevent other containers being paused. -// An error and a map[string]error are returned +// An error and a map[string]error are returned. // If the error is not nil and the map is nil, an error was encountered before -// any containers were paused +// any containers were paused. // If map is not nil, an error was encountered when pausing one or more // containers. The container ID is mapped to the error encountered. The error is -// set to ErrCtrExists +// set to ErrPodPartialFail. // If both error and the map are nil, all containers were paused without error func (p *Pod) Pause() (map[string]error, error) { p.lock.Lock() @@ -219,13 +291,13 @@ func (p *Pod) Pause() (map[string]error, error) { // containers will be ignored. // All containers are unpaused independently. An error unpausing one container // will not prevent other containers being unpaused. -// An error and a map[string]error are returned +// An error and a map[string]error are returned. // If the error is not nil and the map is nil, an error was encountered before -// any containers were unpaused +// any containers were unpaused. // If map is not nil, an error was encountered when unpausing one or more // containers. The container ID is mapped to the error encountered. The error is -// set to ErrCtrExists -// If both error and the map are nil, all containers were unpaused without error +// set to ErrPodPartialFail. +// If both error and the map are nil, all containers were unpaused without error. func (p *Pod) Unpause() (map[string]error, error) { p.lock.Lock() defer p.lock.Unlock() @@ -280,13 +352,13 @@ func (p *Pod) Unpause() (map[string]error, error) { // All containers are started independently, in order dictated by their // dependencies. An error restarting one container // will not prevent other containers being restarted. -// An error and a map[string]error are returned +// An error and a map[string]error are returned. // If the error is not nil and the map is nil, an error was encountered before -// any containers were restarted +// any containers were restarted. // If map is not nil, an error was encountered when restarting one or more // containers. The container ID is mapped to the error encountered. The error is -// set to ErrCtrExists -// If both error and the map are nil, all containers were restarted without error +// set to ErrPodPartialFail. +// If both error and the map are nil, all containers were restarted without error. func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { p.lock.Lock() defer p.lock.Unlock() @@ -328,17 +400,17 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { return nil, nil } -// Kill sends a signal to all running containers within a pod +// Kill sends a signal to all running containers within a pod. // Signals will only be sent to running containers. Containers that are not // running will be ignored. All signals are sent independently, and sending will // continue even if some containers encounter errors. -// An error and a map[string]error are returned +// An error and a map[string]error are returned. // If the error is not nil and the map is nil, an error was encountered before -// any containers were signalled +// any containers were signalled. // If map is not nil, an error was encountered when signalling one or more // containers. The container ID is mapped to the error encountered. The error is -// set to ErrCtrExists -// If both error and the map are nil, all containers were signalled successfully +// set to ErrPodPartialFail. +// If both error and the map are nil, all containers were signalled successfully. func (p *Pod) Kill(signal uint) (map[string]error, error) { p.lock.Lock() defer p.lock.Unlock() @@ -393,8 +465,8 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { return nil, nil } -// Status gets the status of all containers in the pod -// Returns a map of Container ID to Container Status +// Status gets the status of all containers in the pod. +// Returns a map of Container ID to Container Status. func (p *Pod) Status() (map[string]define.ContainerStatus, error) { p.lock.Lock() defer p.lock.Unlock() @@ -430,7 +502,7 @@ func containerStatusFromContainers(allCtrs []*Container) (map[string]define.Cont return status, nil } -// Inspect returns a PodInspect struct to describe the pod +// Inspect returns a PodInspect struct to describe the pod. func (p *Pod) Inspect() (*define.InspectPodData, error) { p.lock.Lock() defer p.lock.Unlock() diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go index 24802f89e..faa627ff1 100644 --- a/libpod/runtime_pod_infra_linux.go +++ b/libpod/runtime_pod_infra_linux.go @@ -83,6 +83,9 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawIm return nil, errors.Wrapf(err, "error removing network namespace from pod %s infra container", p.ID()) } + // For each option in InfraContainerConfig - if set, pass into + // the infra container we're creating with the appropriate + // With... option. if p.config.InfraContainer.StaticIP != nil { options = append(options, WithStaticIP(p.config.InfraContainer.StaticIP)) } @@ -107,6 +110,9 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawIm if len(p.config.InfraContainer.HostAdd) > 0 { options = append(options, WithHosts(p.config.InfraContainer.HostAdd)) } + if len(p.config.InfraContainer.ExitCommand) > 0 { + options = append(options, WithExitCommand(p.config.InfraContainer.ExitCommand)) + } } g.SetRootReadonly(true) diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go index dee5a7d33..7e3f00472 100644 --- a/pkg/api/handlers/libpod/pods.go +++ b/pkg/api/handlers/libpod/pods.go @@ -123,10 +123,17 @@ func PodStop(w http.ResponseWriter, r *http.Request) { } else { responses, stopError = pod.Stop(r.Context(), false) } - if stopError != nil { + if stopError != nil && errors.Cause(stopError) != define.ErrPodPartialFail { utils.Error(w, "Something went wrong", http.StatusInternalServerError, err) return } + // Try to clean up the pod - but only warn on failure, it's nonfatal. + if cleanupCtrs, cleanupErr := pod.Cleanup(r.Context()); cleanupErr != nil { + logrus.Errorf("Error cleaning up pod %s: %v", pod.ID(), cleanupErr) + for id, err := range cleanupCtrs { + logrus.Errorf("Error cleaning up pod %s container %s: %v", pod.ID(), id, err) + } + } var errs []error //nolint for _, err := range responses { errs = append(errs, err) diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go index 2ad6f38c1..65336f1f1 100644 --- a/pkg/bindings/test/pods_test.go +++ b/pkg/bindings/test/pods_test.go @@ -244,7 +244,7 @@ var _ = Describe("Podman pods", func() { Expect(response.State).To(Equal(define.PodStateExited)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). - To(Equal(define.ContainerStateStopped)) + To(Equal(define.ContainerStateExited)) } // Stop an already stopped pod @@ -309,7 +309,7 @@ var _ = Describe("Podman pods", func() { Expect(response.State).To(Equal(define.PodStateExited)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). - To(Equal(define.ContainerStateStopped)) + To(Equal(define.ContainerStateExited)) } _, err = pods.Stop(bt.conn, newpod2, nil) Expect(err).To(BeNil()) @@ -318,7 +318,7 @@ var _ = Describe("Podman pods", func() { Expect(response.State).To(Equal(define.PodStateExited)) for _, i := range response.Containers { Expect(define.StringToContainerStatus(i.State)). - To(Equal(define.ContainerStateStopped)) + To(Equal(define.ContainerStateExited)) } _, err = pods.Prune(bt.conn) Expect(err).To(BeNil()) diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go index 4dd2ab0b3..c4449ba3a 100644 --- a/pkg/specgen/container_validate.go +++ b/pkg/specgen/container_validate.go @@ -3,6 +3,7 @@ package specgen import ( "strings" + "github.com/containers/libpod/v2/libpod/define" "github.com/containers/libpod/v2/pkg/rootless" "github.com/containers/libpod/v2/pkg/util" "github.com/pkg/errors" @@ -34,6 +35,23 @@ func (s *SpecGenerator) Validate() error { } } + // Containers being added to a pod cannot have certain network attributes + // associated with them because those should be on the infra container. + if len(s.Pod) > 0 && s.NetNS.NSMode == FromPod { + if s.StaticIP != nil || s.StaticIPv6 != nil { + return errors.Wrap(define.ErrNetworkOnPodContainer, "static ip addresses must be defined when the pod is created") + } + if s.StaticMAC != nil { + return errors.Wrap(define.ErrNetworkOnPodContainer, "MAC addresses must be defined when the pod is created") + } + if len(s.CNINetworks) > 0 { + return errors.Wrap(define.ErrNetworkOnPodContainer, "networks must be defined when the pod is created") + } + if len(s.PortMappings) > 0 || s.PublishExposedPorts { + return errors.Wrap(define.ErrNetworkOnPodContainer, "published or exposed ports must be defined when the pod is created") + } + } + // // ContainerBasicConfig // diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go index 4fe1b6435..09972da4a 100644 --- a/pkg/specgen/generate/pod_create.go +++ b/pkg/specgen/generate/pod_create.go @@ -13,14 +13,14 @@ func MakePod(p *specgen.PodSpecGenerator, rt *libpod.Runtime) (*libpod.Pod, erro if err := p.Validate(); err != nil { return nil, err } - options, err := createPodOptions(p) + options, err := createPodOptions(p, rt) if err != nil { return nil, err } return rt.NewPod(context.Background(), options...) } -func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, error) { +func createPodOptions(p *specgen.PodSpecGenerator, rt *libpod.Runtime) ([]libpod.PodCreateOption, error) { var ( options []libpod.PodCreateOption ) @@ -31,6 +31,18 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er return nil, err } options = append(options, nsOptions...) + + // Make our exit command + storageConfig := rt.StorageConfig() + runtimeConfig, err := rt.GetConfig() + if err != nil { + return nil, err + } + exitCommand, err := CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), false, false) + if err != nil { + return nil, errors.Wrapf(err, "error creating infra container exit command") + } + options = append(options, libpod.WithPodInfraExitCommand(exitCommand)) } if len(p.CgroupParent) > 0 { options = append(options, libpod.WithPodCgroupParent(p.CgroupParent)) diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go index 5f96f96b1..9fce1aa67 100644 --- a/test/e2e/create_test.go +++ b/test/e2e/create_test.go @@ -471,4 +471,65 @@ var _ = Describe("Podman create", func() { Expect(len(data)).To(Equal(1)) Expect(data[0].Config.StopSignal).To(Equal(uint(15))) }) + + It("create container in pod with IP should fail", func() { + SkipIfRootless() + name := "createwithstaticip" + pod := podmanTest.RunTopContainerInPod("", "new:"+name) + pod.WaitWithDefaultTimeout() + Expect(pod.ExitCode()).To(BeZero()) + + session := podmanTest.Podman([]string{"create", "--pod", name, "--ip", "192.168.1.2", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).ToNot(BeZero()) + }) + + It("create container in pod with mac should fail", func() { + SkipIfRootless() + name := "createwithstaticmac" + pod := podmanTest.RunTopContainerInPod("", "new:"+name) + pod.WaitWithDefaultTimeout() + Expect(pod.ExitCode()).To(BeZero()) + + session := podmanTest.Podman([]string{"create", "--pod", name, "--mac-address", "52:54:00:6d:2f:82", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).ToNot(BeZero()) + }) + + It("create container in pod with network should fail", func() { + SkipIfRootless() + name := "createwithnetwork" + pod := podmanTest.RunTopContainerInPod("", "new:"+name) + pod.WaitWithDefaultTimeout() + Expect(pod.ExitCode()).To(BeZero()) + + session := podmanTest.Podman([]string{"create", "--pod", name, "--network", "foobar", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + //Expect(session.ExitCode()).ToNot(BeZero()) + Expect(session.ExitCode()).To(BeZero()) + }) + + It("create container in pod with ports should fail", func() { + SkipIfRootless() + name := "createwithports" + pod := podmanTest.RunTopContainerInPod("", "new:"+name) + pod.WaitWithDefaultTimeout() + Expect(pod.ExitCode()).To(BeZero()) + + session := podmanTest.Podman([]string{"create", "--pod", name, "-p", "80:80", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).ToNot(BeZero()) + }) + + It("create container in pod ppublish ports should fail", func() { + SkipIfRootless() + name := "createwithpublishports" + pod := podmanTest.RunTopContainerInPod("", "new:"+name) + pod.WaitWithDefaultTimeout() + Expect(pod.ExitCode()).To(BeZero()) + + session := podmanTest.Podman([]string{"create", "--pod", name, "-P", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).ToNot(BeZero()) + }) }) diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go index 72551a38d..3fc45495b 100644 --- a/vendor/github.com/containers/storage/drivers/counter.go +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -51,6 +51,10 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { if c.checker.IsMounted(path) { m.count++ } + } else if !c.checker.IsMounted(path) { + // if the unmount was performed outside of this process (e.g. conmon cleanup) + //the ref counter would lose track of it. Check if it is still mounted. + m.count = 0 } infoOp(m) count := m.count diff --git a/version/version.go b/version/version.go index 2a4816277..8dd18e100 100644 --- a/version/version.go +++ b/version/version.go @@ -4,7 +4,7 @@ package version // NOTE: remember to bump the version at the top // of the top-level README.md file when this is // bumped. -const Version = "2.0.5-dev" +const Version = "2.0.6-dev" // APIVersion is the version for the remote // client API. It is used to determine compatibility |