diff options
-rw-r--r-- | Makefile | 2 | ||||
-rw-r--r-- | RELEASE_NOTES.md | 54 | ||||
-rw-r--r-- | changelog.txt | 202 | ||||
-rw-r--r-- | cmd/podman/common.go | 2 | ||||
-rw-r--r-- | cmd/podman/shared/create.go | 37 | ||||
-rw-r--r-- | contrib/spec/podman.spec.in | 2 | ||||
-rw-r--r-- | docs/libpod.conf.5.md | 3 | ||||
-rw-r--r-- | docs/podman-create.1.md | 29 | ||||
-rw-r--r-- | docs/podman-events.1.md | 1 | ||||
-rw-r--r-- | docs/podman-run.1.md | 14 | ||||
-rw-r--r-- | libpod/container.go | 61 | ||||
-rw-r--r-- | libpod/container_api.go | 29 | ||||
-rw-r--r-- | libpod/container_inspect.go | 1 | ||||
-rw-r--r-- | libpod/container_internal.go | 126 | ||||
-rw-r--r-- | libpod/events/config.go | 2 | ||||
-rw-r--r-- | libpod/events/events.go | 2 | ||||
-rw-r--r-- | libpod/options.go | 35 | ||||
-rw-r--r-- | libpod/pod_api.go | 7 | ||||
-rw-r--r-- | pkg/inspect/inspect.go | 2 | ||||
-rw-r--r-- | pkg/spec/createconfig.go | 20 | ||||
-rw-r--r-- | test/e2e/run_test.go | 45 | ||||
-rw-r--r-- | vendor.conf | 2 | ||||
-rw-r--r-- | vendor/github.com/containers/buildah/buildah.go | 2 | ||||
-rw-r--r-- | vendor/github.com/containers/buildah/pkg/parse/parse.go | 30 | ||||
-rw-r--r-- | vendor/github.com/containers/buildah/vendor.conf | 2 | ||||
-rw-r--r-- | version/version.go | 2 |
26 files changed, 650 insertions, 64 deletions
@@ -1,6 +1,6 @@ GO ?= go DESTDIR ?= / -EPOCH_TEST_COMMIT ?= 7b7397481960c85379d8eb1ed21e76da2ce8a4fc +EPOCH_TEST_COMMIT ?= a9fc570dd844bf1ebd1f106f1b8091882b4a2b29 HEAD ?= HEAD CHANGELOG_BASE ?= HEAD~ CHANGELOG_TARGET ?= HEAD diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 45e844cb8..499f46317 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,5 +1,59 @@ # Release Notes +## 1.3.0 +### Features +- Podman now supports container restart policies! The `--restart-policy` flag on `podman create` and `podman run` allows containers to be restarted after they exit. Please note that Podman cannot restart containers after a system reboot - for that, see our next feature +- Podman `podman generate systemd` command was added to generate systemd unit files for managing Podman containers +- The `podman runlabel` command now allows a `$GLOBAL_OPTS` variable, which will be populated by global options passed to the `podman runlabel` command, allowing custom storage configurations to be passed into containers run with `runlabel` ([#2399](https://github.com/containers/libpod/issues/2399)) +- The `podman play kube` command now allows `File` and `FileOrCreate` volumes +- The `podman pod prune` command was added to prune unused pods +- Added the `podman system migrate` command to migrate containers using older configurations to allow their use by newer Libpod versions ([#2935](https://github.com/containers/libpod/issues/2935)) +- Podman containers now forward proxy-related environment variables from the host into the container with the `--http-proxy` flag (enabled by default) +- Read-only Podman containers can now create tmpfs filesystems on `/tmp`, `/var/tmp`, and `/run` with the `--read-only-tmpfs` flag (enabled by default) +- The `podman init` command was added, performing all container pre-start tasks without starting the container to allow pre-run debugging + +### Bugfixes +- Fixed a bug where `podman cp` would not copy folders ([#2836](https://github.com/containers/libpod/issues/2836)) +- Fixed a bug where Podman would panic when the Varlink API attempted too pull a non-existent image ([#2860](https://github.com/containers/libpod/issues/2860)) +- Fixed a bug where `podman rmi` sometimes did not produce an event when images were deleted +- Fixed a bug where Podman would panic when the Varlink API passed improperly-formatted options when attempting to build ([#2869](https://github.com/containers/libpod/issues/2869)) +- Fixed a bug where `podman images` would not print a header if no images were present ([#2877](https://github.com/containers/libpod/pull/2877)) +- Fixed a bug where the `podman images` command with `--filter dangling=false` would incorrectly print dangling images instead of images which are not dangling ([#2884](https://github.com/containers/libpod/issues/2884)) +- Fixed a bug where rootless Podman would panic when any command was run after the system was rebooted ([#2894](https://github.com/containers/libpod/issues/2894)) +- Fixed a bug where Podman containers in user namespaces would include undesired directories from the host in `/sys/kernel` +- Fixed a bug where `podman create` would panic when trying to create a container whose name already existed +- Fixed a bug where `podman pull` would exit 0 on failing to pull an image ([#2785](https://github.com/containers/libpod/issues/2785)) +- Fixed a bug where `podman pull` would not properly print the cause of errors that occurred ([#2710](https://github.com/containers/libpod/issues/2710)) +- Fixed a bug where rootless Podman commands were not properly suspended via `ctrl-z` in a shell ([#2775](https://github.com/containers/libpod/issues/2775)) +- Fixed a bug where Podman would error when cleaning up containers when some container mountpoints in `/sys/` were cleaned up already by the closing of the mount namespace +- Fixed a bug where `podman play kube` was not including environment variables from the image run ([#2930](https://github.com/containers/libpod/issues/2930)) +- Fixed a bug where `podman play kube` would not properly clean up partially-created pods when encountering an error +- Fixed a bug where `podman commit` with the `--change` flag improperly set `CMD` when a multipart value was provided ([#2951](https://github.com/containers/libpod/issues/2951)) +- Fixed a bug where the `--mount` flag to `podman create` and `podman run` did not properly validate its arguments, causing Podman to panic +- Fixed a bug where conflicts between mounts created by the `--mount`, `--volume`, and `--tmpfs` flags were not properly reported +- Fixed a bug where the `--mount` flag could not be used with named volumes +- Fixed a bug where the `--mount` flag did not properly set options for created tmpfs filesystems +- Fixed a bug where rootless Podman could close too many file descriptors, causing Podman to panic ([#2964](https://github.com/containers/libpod/issues/2964)) +- Fixed a bug where `podman logout` would not print an error when the login was established by `docker login` ([#2735](https://github.com/containers/libpod/issues/2735)) +- Fixed a bug where `podman stop` would error when not all containers were running ([#2993](https://github.com/containers/libpod/issues/2993)) +- Fixed a bug where `podman pull` would fail to pull images by shortname if they were not present in the `docker.io` registry +- Fixed a bug where `podman login` would error when credentials were not present if a credential helper was configured ([#1675](https://github.com/containers/libpod/issues/1675)) +- Fixed a bug where the `podman system renumber` command and Podman post-reboot state refreshes would not create events +- Fixed a bug where the `podman top` command was not compatible with `docker top` syntax + +### Misc +- Updated vendored Buildah to v1.8.2 +- Updated vendored containers/storage to v1.12.6 +- Updated vendored containers/psgo to v1.2.1 +- Updated to sysregistriesv2, including slight changes to the `registries.conf` config file +- Rootless Podman now places all containers within a single user namespace. This change will not take effect for existing containers until containers are restarted, and containers that are not restarted may not be fully usable +- The `podman run`, `podman create`, `podman start`, `podman restart`, `podman attach`, `podman stop`, `podman port`, `podman rm`, `podman top`, `podman image tree`, `podman generate kube`, `podman umount`, `podman container checkpoint`, and `podman container restore` commands are now available in the remote client +- The Podman remote client now builds on Windows +- A major refactor of volumes created using the `podman volume` command was performed. There should be no major user-facing changes, but downgrading from Podman 1.3 to previous versions may render some volumes unable to be removed. +- The `podman events` command now logs events to journald by default. The old behavior (log to file) can be configured in podman.conf via the `events_logger` option +- The `podman commit` command, in versions 1.2 and earlier, included all volumes mounted into the container as image volumes in the committed image. This behavior was incorrect and has been disabled by default; it can be reenabled with the `--include-volumes` flag + + ## 1.2.0 ### Features - Podman now supports image healthchecks! The `podman healthcheck run` command was added to manually run healthchecks, and the status of a running healthcheck can be viewed via `podman inspect` diff --git a/changelog.txt b/changelog.txt index 92a17f8d0..c72117d7f 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,205 @@ +- Changelog for v1.3.0 (2019-05-06) + * Update release notes for 1.3.0 release + * Bump to Buildah v1.8.2 + * Document events logger options in libpod.conf manpage + * Try and fix restart-policy tests + * fix logout message if login only with docker + * Fix manpage typos + * Small code fix + * Fix 'restart' event in journald + * change from sysregistries to sysregistriesv2 + * Address review comments on restart policy + * Add a test for restart policy + * Add a restart event, and make one during restart policy + * Restart policy should not run if a container is running + * Restart policy conflicts with the --rm flag + * Move to using constants for valid restart policy types + * Add manpage information for restart policy + * Add support for retry count with --restart flag + * Sending signals to containers prevents restart policy + * Add container restart policy to Libpod & Podman + * Add a StoppedByUser field to the DB + * top: fallback to execing ps(1) + * clean up shared/parse/parse.go + * Generate systemd unit files for containers + * Fix podman-in-podman volume test + * Cirrus: Add pipefail confirmation check + * Cirrus: timestamp all output script output + * Update c/storage to v1.12.6 + * Fix typo in init manpage + * Add an InvalidState varlink error for Init + * Bump Buildah to v1.8.1, ImageBuilder to v1.1.0 + * Add variable for global flags to runlabel + * docs: Fix typo "healthcheck" pt2 + * cirrus lib.sh: refactor req_env_var() + * Remove two bits of dead code + * http-proxy: improve docs + * Small fixes for #2950 + * container: drop rootless check + * Add basic structure of podman init command + * Move handling of ReadOnlyTmpfs into new mounts code + * Begin adding volume tests + * Ensure that named volumes have their options parsed + * Add options parsing for tmpfs mounts + * Use EqualValues instead of reflect equality + * Hit a number of to-do comments in unified volumes code + * Fix options for non-bind and non-tmpfs volumes + * Migrate unit tests from cmd/podman into pkg/spec + * Migrate to unified volume handling code + * Always pass pod into MakeContainerConfig + * Remove non-config fields from CreateConfig + * Add a new function for converting a CreateConfig + * podman-remote port + * install.md contains hints for rootless setup on arch linux + * auto pass http_proxy into container + * enable podman-remote on windows + * Use 'sudo tee' in tutorial so install works as non-root + * Refactor container cleanup to use latest functions + * Move --mount in run man page + * Add details on rootless Podman to the readme + * podman-remote stop + * correct upstream vndr issues + * runtime: pass down the context + * system: add new subcommand "migrate" + * Vendor in latest buildah code + * remove manual install of libsystemd-dev + * Vendor in latest containers/storage + * Add --read-only-tmpfs options + * Fix remote-client testing reports + * podman-remote prune containers + * Do not hard fail on non-decodable events + * update psgo to v1.2.1 + * Add System event type and renumber, refresh events + * enable podman remote top + * fix login supports credHelpers config + * Cirrus: Collect audit log on success and failure + * Add a debug message indicating that a refresh occurred + * image: rework parent/child/history matching + * images: add context to GetParent/IsParent/Remove/Prune... + * build podman-remote with Dockerfile. + * point to 3rd party tools for `docker-compose` format + * Update vendor of container/storage + * journald event logging + * podman remote-client restart containers + * Cirrus: Use freshly built images + * Cirrus: Bump up runc commit + * Cirrus: fix obsolete Ubuntu package + * Cirrus: Install libsystemd-dev on Ubuntu + * pull: special case all-tags semantics + * Fix test compile + * Trim whitespace from ps -q before comparing + * Enhance tests for stop to check results + * Add extra CI tests for stopping all containers + * Fix podman stop --all attempting to stop created ctrs + * Cirrus: Temp. override container-selinux on F29 + * Refactor of 'podman prune' to better support remote + * bats - various small updates + * podman-remote pause|unpause + * Internal names do not match external names + * Add header to play kube output + * Clean up after play kube failure + * rootless: not close more FDs than needed + * Fix COPR builds to start working again + * Fix podman command --change CMD + * podman-remote start + * Vendor in latest Buildah + * Added remote pod prune + * Add podman pod prune + * podman-remote container commands + * Fix segfaults attribute to missing options + * Call the runtime with WithRenumber() when asked + * Add File mounts to play kube + * cmd, pkg: drop commented code + * pod: drop dead code + * rootless, mount: not create namespace + * Incorporate image and default environment variables in play kube + * Validate ENV/LABEL Change options in varlink + * oci: fix umount of /sys/kernel + * Revert "rootless: set controlling terminal for podman in the userns" + * Remove old crio reference from man pages + * create: fix segfault if container name already exists + * adding uidmap to install steps for ubuntu + * podman-remote generate kube + * rootless: do not block SIGTSTP + * rootless: set controlling terminal for podman in the userns + * Use GetContainer instead of LookupContainer for full ID + * pull: exit with error if the image is not found + * Use the same SELinux label for CRIU log files + * pull: remove cryptic error message + * new uidmap BATS test: fix + * adding additional update, needed for install + * Fix README.md -> rootless.md link + * Fixes for podman-remote run and attach + * remote-client checkpoint/restore + * Expand debugging for container cleanup errors + * spec: mask /sys/kernel when bind mounting /sys + * Add --include-volumes flag to 'podman commit' + * oci: add /sys/kernel to the masked paths + * userns: prevent /sys/kernel/* paths in the container + * imagefilter dangling handling corrected + * rootless: fix segfault on refresh if there are containers + * Add demo script and cast to images + * Initial remote flag clean up + * (minor): fix misspelled 'Healthcheck' + * BATS tests: start supporting podman-remote + * Add the ability to attach remotely to a container + * Print header for 'podman images' even with no images + * podman-remote ps + * Re-run (make vendor) to drop the now unnecessary collation code and tables + * Potentially breaking: Make hooks sort order locale-independent + * Implement podman-remote rm + * ps: now works with --size and nonroot + * Update invalid name errors to report the correct regex + * cirrus: enable remote tests for rootless + * test: fix remote tests for rootless + * test: enable userns e2e tests for rootless + * CI check for --help vs man pages: usability fix + * podman-remote create|run + * Correct varlink pull panic + * add image rmi event + * Revert "Switch to golangci-lint" + * Document shortcomings with rootless podman + * podman: enable kube for rootless + * kube: correctly set the default for MemorySwappiness + * rootless: enable healthcheck tests + * Respect image entrypoint in play kube + * Increase CI resources to help avoid hitting timeouts + * podman-remote image tree + * Added port forwarding and IP address hint. + * fix bug podman cp directory + * Fix E2E tests + * Drop LocalVolumes from our the database + * Major rework of --volumes-from flag + * Volume force-remove now removed dependent containers + * Add handling for new named volumes code in pkg/spec + * Create non-existing named volumes at container create + * Switch Libpod over to new explicit named volumes + * Add named volumes for each container to database + * Add varcheck linter + * Add deadcode linter + * Update lint to use golangci-lint + * Update registrar unit tests to match them of cri-o + * Update run tests to be skipped when not supported + * Fix Dockerfile dependencies for packer tests + * Update Dockerfile to use golang:1.12 image + * Fix a potential segfault in podman search + * Improve podman pod rm -a test + * Cirrus: Update F28 -> F29 container image + * --size does not work with rootless at present + * add remote-client diff + * Cirrus: Support special-case modes of testing + * rootless: use a single user namespace + * rootless: remove SkipStorageSetup() + * Update cri-o annotations + * Update README with current version + * docs/podman*.md: fix numerous option typos and spacing errors + * docs/podman-rm.1.md: delete "Not yet implemented" msg for volume removal + * docs/podman-inspect.1.md: add missing option hyphen for "-t" + * Bump gitvalidation epoch + * Bump to v1.3.0-dev + * Fix location of libpod.conf + * Capitalize global options help information + - Changelog for v1.2.0 (2019-03-30) * Update release notes for v1.2.0 * Remove wait event diff --git a/cmd/podman/common.go b/cmd/podman/common.go index 8aca08248..c0bcaa5c5 100644 --- a/cmd/podman/common.go +++ b/cmd/podman/common.go @@ -444,7 +444,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) { ) createFlags.String( "restart", "", - "Restart is not supported. Please use a systemd unit file for restart", + "Restart policy to apply when a container exits", ) createFlags.Bool( "rm", false, diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go index 81566326b..d1f704374 100644 --- a/cmd/podman/shared/create.go +++ b/cmd/podman/shared/create.go @@ -41,6 +41,9 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod. span, _ := opentracing.StartSpanFromContext(ctx, "createContainer") defer span.Finish() } + if c.Bool("rm") && c.String("restart") != "" && c.String("restart") != "no" { + return nil, nil, errors.Errorf("the --rm option conflicts with --restart") + } rtc, err := runtime.GetConfig() if err != nil { @@ -279,9 +282,6 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. blkioWeight uint16 namespaces map[string]string ) - if c.IsSet("restart") { - return nil, errors.Errorf("--restart option is not supported.\nUse systemd unit files for restarting containers") - } idmappings, err := util.ParseIDMapping(c.StringSlice("uidmap"), c.StringSlice("gidmap"), c.String("subuidname"), c.String("subgidname")) if err != nil { @@ -676,21 +676,22 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. PidsLimit: c.Int64("pids-limit"), Ulimit: c.StringSlice("ulimit"), }, - Rm: c.Bool("rm"), - StopSignal: stopSignal, - StopTimeout: c.Uint("stop-timeout"), - Sysctl: sysctl, - Systemd: systemd, - Tmpfs: c.StringSlice("tmpfs"), - Tty: tty, - User: user, - UsernsMode: usernsMode, - MountsFlag: c.StringArray("mount"), - Volumes: c.StringArray("volume"), - WorkDir: workDir, - Rootfs: rootfs, - VolumesFrom: c.StringSlice("volumes-from"), - Syslog: c.Bool("syslog"), + RestartPolicy: c.String("restart"), + Rm: c.Bool("rm"), + StopSignal: stopSignal, + StopTimeout: c.Uint("stop-timeout"), + Sysctl: sysctl, + Systemd: systemd, + Tmpfs: c.StringSlice("tmpfs"), + Tty: tty, + User: user, + UsernsMode: usernsMode, + MountsFlag: c.StringArray("mount"), + Volumes: c.StringArray("volume"), + WorkDir: workDir, + Rootfs: rootfs, + VolumesFrom: c.StringSlice("volumes-from"), + Syslog: c.Bool("syslog"), } if config.Privileged { diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in index f3ee01bca..d69b673e0 100644 --- a/contrib/spec/podman.spec.in +++ b/contrib/spec/podman.spec.in @@ -39,7 +39,7 @@ %global shortcommit_conmon %(c=%{commit_conmon}; echo ${c:0:7}) Name: podman -Version: 1.3.0 +Version: 1.3.1 Release: #COMMITDATE#.git%{shortcommit0}%{?dist} Summary: Manage Pods, Containers and Container Images License: ASL 2.0 diff --git a/docs/libpod.conf.5.md b/docs/libpod.conf.5.md index 4abbcd8b0..2f0b3f303 100644 --- a/docs/libpod.conf.5.md +++ b/docs/libpod.conf.5.md @@ -95,6 +95,9 @@ libpod to manage containers. Path to the command binary to use for setting up a network. It is currently only used for setting up a slirp4netns network. If "" is used then the binary is looked up using the $PATH environment variable. +**events_logger**="" + Default method to use when logging events. Valid values are "journald" and "file". + ## FILES `/usr/share/containers/libpod.conf`, default libpod configuration path diff --git a/docs/podman-create.1.md b/docs/podman-create.1.md index 6d7d983b6..851f5cf3d 100644 --- a/docs/podman-create.1.md +++ b/docs/podman-create.1.md @@ -567,11 +567,17 @@ If container is running in --read-only mode, then mount a read-write tmpfs on /r **--restart=""** -Not implemented. +Restart policy to follow when containers exit. +Restart policy will not take effect if a container is stopped via the `podman kill` or `podman stop` commands. +Valid values are: -Restart should be handled via a systemd unit files. Please add your podman -commands to a unit file and allow systemd or your init system to handle the -restarting of the container processes. See example below. +- `no` : Do not restart containers on exit +- `on-failure[:max_retries]` : Restart containers when they exit with a non-0 exit code, retrying indefinitely or until the optional max_retries count is hit +- `always` : Restart containers when they exit, regardless of status, retrying indefinitely + +Please note that restart will not restart containers after a system reboot. +If this functionality is required in your environment, you can invoke Podman from a systemd unit file, or create an init script for whichever init system is in use. +To generate systemd unit files, please see *podman generate systemd* **--rm**=*true*|*false* @@ -859,21 +865,6 @@ the uids and gids from the host. $ podman create --uidmap 0:30000:7000 --gidmap 0:30000:7000 fedora echo hello ``` -### Running a podman container to restart inside of a systemd unit file - - -``` -[Unit] -Description=My App -[Service] -Restart=always -ExecStart=/usr/bin/podman start -a my_app -ExecStop=/usr/bin/podman stop -t 10 my_app -KillMode=process -[Install] -WantedBy=multi-user.target -``` - ### Rootless Containers Podman runs as a non root user on most systems. This feature requires that a new enough version of shadow-utils diff --git a/docs/podman-events.1.md b/docs/podman-events.1.md index da142c0fb..3ccecac28 100644 --- a/docs/podman-events.1.md +++ b/docs/podman-events.1.md @@ -28,6 +28,7 @@ The *container* event type will report the follow statuses: * pause * prune * remove + * restart * restore * start * stop diff --git a/docs/podman-run.1.md b/docs/podman-run.1.md index 9efb7f51c..db90ce50e 100644 --- a/docs/podman-run.1.md +++ b/docs/podman-run.1.md @@ -589,11 +589,17 @@ If container is running in --read-only mode, then mount a read-write tmpfs on /r **--restart=""** -Not implemented. +Restart policy to follow when containers exit. +Restart policy will not take effect if a container is stopped via the `podman kill` or `podman stop` commands. +Valid values are: -Restart should be handled via a systemd unit files. Please add your podman -commands to a unit file and allow systemd or your init system to handle the -restarting of the container processes. See *podman generate systemd*. +- `no` : Do not restart containers on exit +- `on-failure[:max_retries]` : Restart containers when they exit with a non-0 exit code, retrying indefinitely or until the optional max_retries count is hit +- `always` : Restart containers when they exit, regardless of status, retrying indefinitely + +Please note that restart will not restart containers after a system reboot. +If this functionality is required in your environment, you can invoke Podman from a systemd unit file, or create an init script for whichever init system is in use. +To generate systemd unit files, please see *podman generate systemd* **--rm**=*true*|*false* diff --git a/libpod/container.go b/libpod/container.go index 4bf9a1ba9..c07f4c78d 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -102,6 +102,20 @@ func (ns LinuxNS) String() string { } } +// Valid restart policy types. +const ( + // RestartPolicyNone indicates that no restart policy has been requested + // by a container. + RestartPolicyNone = "" + // RestartPolicyNo is identical in function to RestartPolicyNone. + RestartPolicyNo = "no" + // RestartPolicyAlways unconditionally restarts the container. + RestartPolicyAlways = "always" + // RestartPolicyOnFailure restarts the container on non-0 exit code, + // with an optional maximum number of retries. + RestartPolicyOnFailure = "on-failure" +) + // Container is a single OCI container. // All operations on a Container that access state must begin with a call to // syncContainer(). @@ -179,6 +193,16 @@ type ContainerState struct { // This maps the path the file will be mounted to in the container to // the path of the file on disk outside the container BindMounts map[string]string `json:"bindMounts,omitempty"` + // StoppedByUser indicates whether the container was stopped by an + // explicit call to the Stop() API. + StoppedByUser bool `json:"stoppedByUser,omitempty"` + // RestartPolicyMatch indicates whether the conditions for restart + // policy have been met. + RestartPolicyMatch bool `json:"restartPolicyMatch,omitempty"` + // RestartCount is how many times the container was restarted by its + // restart policy. This is NOT incremented by normal container restarts + // (only by restart policy). + RestartCount uint `json:"restartCount,omitempty"` // ExtensionStageHooks holds hooks which will be executed by libpod // and not delegated to the OCI runtime. @@ -346,6 +370,17 @@ type ContainerConfig struct { LogPath string `json:"logPath"` // File containing the conmon PID ConmonPidFile string `json:"conmonPidFile,omitempty"` + // RestartPolicy indicates what action the container will take upon + // exiting naturally. + // Allowed options are "no" (take no action), "on-failure" (restart on + // non-zero exit code, up an a maximum of RestartRetries times), + // and "always" (always restart the container on any exit code). + // The empty string is treated as the default ("no") + RestartPolicy string `json:"restart_policy,omitempty"` + // RestartRetries indicates the number of attempts that will be made to + // restart the container. Used only if RestartPolicy is set to + // "on-failure". + RestartRetries uint `json:"restart_retries,omitempty"` // TODO log options for log drivers PostConfigureNetNS bool `json:"postConfigureNetNS"` @@ -729,6 +764,17 @@ func (c *Container) LogPath() string { return c.config.LogPath } +// RestartPolicy returns the container's restart policy. +func (c *Container) RestartPolicy() string { + return c.config.RestartPolicy +} + +// RestartRetries returns the number of retries that will be attempted when +// using the "on-failure" restart policy +func (c *Container) RestartRetries() uint { + return c.config.RestartRetries +} + // RuntimeName returns the name of the runtime func (c *Container) RuntimeName() string { return c.runtime.ociRuntime.name @@ -1003,6 +1049,21 @@ func (c *Container) BindMounts() (map[string]string, error) { return newMap, nil } +// StoppedByUser returns whether the container was last stopped by an explicit +// call to the Stop() API, or whether it exited naturally. +func (c *Container) StoppedByUser() (bool, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return false, err + } + } + + return c.state.StoppedByUser, nil +} + // Misc Accessors // Most will require locking diff --git a/libpod/container_api.go b/libpod/container_api.go index 5bfd869b3..5bb610aab 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -57,11 +57,11 @@ func (c *Container) Init(ctx context.Context) (err error) { if c.state.State == ContainerStateStopped { // Reinitialize the container - return c.reinit(ctx) + return c.reinit(ctx, false) } // Initialize the container for the first time - return c.init(ctx) + return c.init(ctx, false) } // Start starts a container. @@ -199,8 +199,15 @@ func (c *Container) Kill(signal uint) error { if c.state.State != ContainerStateRunning { return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers") } + defer c.newContainerEvent(events.Kill) - return c.runtime.ociRuntime.killContainer(c, signal) + if err := c.runtime.ociRuntime.killContainer(c, signal); err != nil { + return err + } + + c.state.StoppedByUser = true + + return c.save() } // Exec starts a new process inside the container @@ -583,6 +590,7 @@ func (c *Container) Cleanup(ctx context.Context) error { if !c.batched { c.lock.Lock() defer c.lock.Unlock() + if err := c.syncContainer(); err != nil { return err } @@ -593,6 +601,19 @@ func (c *Container) Cleanup(ctx context.Context) error { return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID()) } + // Handle restart policy. + // Returns a bool indicating whether we actually restarted. + // If we did, don't proceed to cleanup - just exit. + didRestart, err := c.handleRestartPolicy(ctx) + if err != nil { + return err + } + if didRestart { + return nil + } + + // If we didn't restart, we perform a normal cleanup + // Check if we have active exec sessions if len(c.state.ExecSessions) != 0 { return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID()) @@ -754,7 +775,7 @@ func (c *Container) Refresh(ctx context.Context) error { if err := c.prepare(); err != nil { return err } - if err := c.init(ctx); err != nil { + if err := c.init(ctx, false); err != nil { return err } } diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index aa3a07888..a7369bfdd 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -95,6 +95,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data) LogPath: config.LogPath, ConmonPidFile: config.ConmonPidFile, Name: config.Name, + RestartCount: int32(runtimeInfo.RestartCount), Driver: driverData.Name, MountLabel: config.MountLabel, ProcessLabel: config.ProcessLabel, diff --git a/libpod/container_internal.go b/libpod/container_internal.go index a791df491..0b4e5763e 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -210,6 +210,90 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error { return nil } +// Handle container restart policy. +// This is called when a container has exited, and was not explicitly stopped by +// an API call to stop the container or pod it is in. +func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, err error) { + // If we did not get a restart policy match, exit immediately. + // Do the same if we're not a policy that restarts. + if !c.state.RestartPolicyMatch || + c.config.RestartPolicy == RestartPolicyNo || + c.config.RestartPolicy == RestartPolicyNone { + return false, nil + } + + // If we're RestartPolicyOnFailure, we need to check retries and exit + // code. + if c.config.RestartPolicy == RestartPolicyOnFailure { + if c.state.ExitCode == 0 { + return false, nil + } + + // If we don't have a max retries set, continue + if c.config.RestartRetries > 0 { + if c.state.RestartCount < c.config.RestartRetries { + logrus.Debugf("Container %s restart policy trigger: on retry %d (of %d)", + c.ID(), c.state.RestartCount, c.config.RestartRetries) + } else { + logrus.Debugf("Container %s restart policy trigger: retries exhausted", c.ID()) + return false, nil + } + } + } + + logrus.Debugf("Restarting container %s due to restart policy %s", c.ID(), c.config.RestartPolicy) + + // Need to check if dependencies are alive. + if err = c.checkDependenciesAndHandleError(ctx); err != nil { + return false, err + } + + // Is the container running again? + // If so, we don't have to do anything + if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused { + return false, nil + } else if c.state.State == ContainerStateUnknown { + return false, errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!") + } + + c.newContainerEvent(events.Restart) + + // Increment restart count + c.state.RestartCount = c.state.RestartCount + 1 + logrus.Debugf("Container %s now on retry %d", c.ID(), c.state.RestartCount) + if err := c.save(); err != nil { + return false, err + } + + defer func() { + if err != nil { + if err2 := c.cleanup(ctx); err2 != nil { + logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2) + } + } + }() + if err := c.prepare(); err != nil { + return false, err + } + + if c.state.State == ContainerStateStopped { + // Reinitialize the container if we need to + if err := c.reinit(ctx, true); err != nil { + return false, err + } + } else if c.state.State == ContainerStateConfigured || + c.state.State == ContainerStateExited { + // Initialize the container + if err := c.init(ctx, true); err != nil { + return false, err + } + } + if err := c.start(); err != nil { + return false, err + } + return true, nil +} + // Sync this container with on-disk state and runtime status // Should only be called with container lock held // This function should suffice to ensure a container's state is accurate and @@ -230,6 +314,14 @@ func (c *Container) syncContainer() error { } // Only save back to DB if state changed if c.state.State != oldState { + // Check for a restart policy match + if c.config.RestartPolicy != RestartPolicyNone && c.config.RestartPolicy != RestartPolicyNo && + (oldState == ContainerStateRunning || oldState == ContainerStatePaused) && + (c.state.State == ContainerStateStopped || c.state.State == ContainerStateExited) && + !c.state.StoppedByUser { + c.state.RestartPolicyMatch = true + } + if err := c.save(); err != nil { return err } @@ -376,6 +468,9 @@ func resetState(state *ContainerState) error { state.ExecSessions = make(map[string]*ExecSession) state.NetworkStatus = nil state.BindMounts = make(map[string]string) + state.StoppedByUser = false + state.RestartPolicyMatch = false + state.RestartCount = 0 return nil } @@ -569,13 +664,13 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err if c.state.State == ContainerStateStopped { // Reinitialize the container if we need to - if err := c.reinit(ctx); err != nil { + if err := c.reinit(ctx, false); err != nil { return err } } else if c.state.State == ContainerStateConfigured || c.state.State == ContainerStateExited { // Or initialize it if necessary - if err := c.init(ctx); err != nil { + if err := c.init(ctx, false); err != nil { return err } } @@ -763,7 +858,7 @@ func (c *Container) completeNetworkSetup() error { } // Initialize a container, creating it in the runtime -func (c *Container) init(ctx context.Context) error { +func (c *Container) init(ctx context.Context, retainRetries bool) error { span, _ := opentracing.StartSpanFromContext(ctx, "init") span.SetTag("struct", "container") defer span.Finish() @@ -789,6 +884,12 @@ func (c *Container) init(ctx context.Context) error { c.state.ExitCode = 0 c.state.Exited = false c.state.State = ContainerStateCreated + c.state.StoppedByUser = false + c.state.RestartPolicyMatch = false + + if !retainRetries { + c.state.RestartCount = 0 + } if err := c.save(); err != nil { return err @@ -851,7 +952,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error { // Should only be done on ContainerStateStopped containers. // Not necessary for ContainerStateExited - the container has already been // removed from the runtime, so init() can proceed freely. -func (c *Container) reinit(ctx context.Context) error { +func (c *Container) reinit(ctx context.Context, retainRetries bool) error { span, _ := opentracing.StartSpanFromContext(ctx, "reinit") span.SetTag("struct", "container") defer span.Finish() @@ -863,7 +964,7 @@ func (c *Container) reinit(ctx context.Context) error { } // Initialize the container again - return c.init(ctx) + return c.init(ctx, retainRetries) } // Initialize (if necessary) and start a container @@ -901,12 +1002,12 @@ func (c *Container) initAndStart(ctx context.Context) (err error) { if c.state.State == ContainerStateStopped { logrus.Debugf("Recreating container %s in OCI runtime", c.ID()) - if err := c.reinit(ctx); err != nil { + if err := c.reinit(ctx, false); err != nil { return err } } else if c.state.State == ContainerStateConfigured || c.state.State == ContainerStateExited { - if err := c.init(ctx); err != nil { + if err := c.init(ctx, false); err != nil { return err } } @@ -950,6 +1051,11 @@ func (c *Container) stop(timeout uint) error { return err } + c.state.StoppedByUser = true + if err := c.save(); err != nil { + return errors.Wrapf(err, "error saving container %s state after stopping", c.ID()) + } + // Wait until we have an exit file, and sync once we do return c.waitForExitFileAndSync() } @@ -986,6 +1092,8 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e return errors.Wrapf(ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state") } + c.newContainerEvent(events.Restart) + if c.state.State == ContainerStateRunning { if err := c.stop(timeout); err != nil { return err @@ -1004,13 +1112,13 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e if c.state.State == ContainerStateStopped { // Reinitialize the container if we need to - if err := c.reinit(ctx); err != nil { + if err := c.reinit(ctx, false); err != nil { return err } } else if c.state.State == ContainerStateConfigured || c.state.State == ContainerStateExited { // Initialize the container - if err := c.init(ctx); err != nil { + if err := c.init(ctx, false); err != nil { return err } } diff --git a/libpod/events/config.go b/libpod/events/config.go index 36387e835..810988205 100644 --- a/libpod/events/config.go +++ b/libpod/events/config.go @@ -134,6 +134,8 @@ const ( // Renumber indicates that lock numbers were reallocated at user // request. Renumber Status = "renumber" + // Restart indicates the target was restarted via an API call. + Restart Status = "restart" // Restore ... Restore Status = "restore" // Save ... diff --git a/libpod/events/events.go b/libpod/events/events.go index 202c9db4e..650a47bfb 100644 --- a/libpod/events/events.go +++ b/libpod/events/events.go @@ -144,6 +144,8 @@ func StringToStatus(name string) (Status, error) { return Remove, nil case Renumber.String(): return Renumber, nil + case Restart.String(): + return Restart, nil case Restore.String(): return Restore, nil case Save.String(): diff --git a/libpod/options.go b/libpod/options.go index 86c04db09..7ec7dfe63 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1239,6 +1239,41 @@ func WithUseImageHosts() CtrCreateOption { } } +// WithRestartPolicy sets the container's restart policy. Valid values are +// "no", "on-failure", and "always". The empty string is allowed, and will be +// equivalent to "no". +func WithRestartPolicy(policy string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return ErrCtrFinalized + } + + switch policy { + case RestartPolicyNone, RestartPolicyNo, RestartPolicyOnFailure, RestartPolicyAlways: + ctr.config.RestartPolicy = policy + default: + return errors.Wrapf(ErrInvalidArg, "%q is not a valid restart policy", policy) + } + + return nil + } +} + +// WithRestartRetries sets the number of retries to use when restarting a +// container with the "on-failure" restart policy. +// 0 is an allowed value, and indicates infinite retries. +func WithRestartRetries(tries uint) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return ErrCtrFinalized + } + + ctr.config.RestartRetries = tries + + return nil + } +} + // withIsInfra sets the container to be an infra container. This means the container will be sometimes hidden // and expected to be the first container in the pod. func withIsInfra() CtrCreateOption { diff --git a/libpod/pod_api.go b/libpod/pod_api.go index 9a6baf23e..9ed5c88eb 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -364,6 +364,13 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { } logrus.Debugf("Killed container %s with signal %d", ctr.ID(), signal) + + ctr.state.StoppedByUser = true + if err := ctr.save(); err != nil { + ctrErrors[ctr.ID()] = err + } + + ctr.lock.Unlock() } if len(ctrErrors) > 0 { diff --git a/pkg/inspect/inspect.go b/pkg/inspect/inspect.go index 6978370ef..693755aa8 100644 --- a/pkg/inspect/inspect.go +++ b/pkg/inspect/inspect.go @@ -161,7 +161,7 @@ type ContainerInspectData struct { LogPath string `json:"LogPath"` ConmonPidFile string `json:"ConmonPidFile"` Name string `json:"Name"` - RestartCount int32 `json:"RestartCount"` //TODO + RestartCount int32 `json:"RestartCount"` Driver string `json:"Driver"` MountLabel string `json:"MountLabel"` ProcessLabel string `json:"ProcessLabel"` diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go index 90e7accf3..9979e773c 100644 --- a/pkg/spec/createconfig.go +++ b/pkg/spec/createconfig.go @@ -108,6 +108,7 @@ type CreateConfig struct { ReadOnlyRootfs bool //read-only ReadOnlyTmpfs bool //read-only-tmpfs Resources CreateResourceConfig + RestartPolicy string Rm bool //rm StopSignal syscall.Signal // stop-signal StopTimeout uint // stop-timeout @@ -359,6 +360,25 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l options = append(options, libpod.WithCgroupParent(c.CgroupParent)) } + if c.RestartPolicy != "" { + if c.RestartPolicy == "unless-stopped" { + return nil, errors.Wrapf(libpod.ErrInvalidArg, "the unless-stopped restart policy is not supported") + } + + split := strings.Split(c.RestartPolicy, ":") + if len(split) > 1 { + numTries, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errors.Wrapf(err, "%s is not a valid number of retries for restart policy", split[1]) + } + if numTries < 0 { + return nil, errors.Wrapf(libpod.ErrInvalidArg, "restart policy requires a positive number of retries") + } + options = append(options, libpod.WithRestartRetries(uint(numTries))) + } + options = append(options, libpod.WithRestartPolicy(split[0])) + } + // Always use a cleanup process to clean up Podman after termination exitCmd, err := c.createExitCommand(runtime) if err != nil { diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go index fe95db016..030722b47 100644 --- a/test/e2e/run_test.go +++ b/test/e2e/run_test.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "strings" + "time" . "github.com/containers/libpod/test/utils" "github.com/mrunalp/fileutils" @@ -720,4 +721,48 @@ USER mail` Expect(session.ExitCode()).To(Equal(1)) os.Unsetenv("http_proxy") }) + + It("podman run with restart-policy always restarts containers", func() { + podmanTest.RestoreArtifact(fedoraMinimal) + + testDir := filepath.Join(podmanTest.RunRoot, "restart-test") + err := os.Mkdir(testDir, 0755) + Expect(err).To(BeNil()) + + aliveFile := filepath.Join(testDir, "running") + file, err := os.Create(aliveFile) + Expect(err).To(BeNil()) + file.Close() + + session := podmanTest.Podman([]string{"run", "-dt", "--restart", "always", "-v", fmt.Sprintf("%s:/tmp/runroot:Z", testDir), fedoraMinimal, "bash", "-c", "date +%N > /tmp/runroot/ran && while test -r /tmp/runroot/running; do sleep 0.1s; done"}) + + found := false + testFile := filepath.Join(testDir, "ran") + for i := 0; i < 10; i++ { + time.Sleep(1 * time.Second) + if _, err := os.Stat(testFile); err == nil { + found = true + err = os.Remove(testFile) + Expect(err).To(BeNil()) + break + } + } + Expect(found).To(BeTrue()) + + err = os.Remove(aliveFile) + Expect(err).To(BeNil()) + + session.WaitWithDefaultTimeout() + + // 10 seconds to restart the container + found = false + for i := 0; i < 10; i++ { + time.Sleep(1 * time.Second) + if _, err := os.Stat(testFile); err == nil { + found = true + break + } + } + Expect(found).To(BeTrue()) + }) }) diff --git a/vendor.conf b/vendor.conf index c99b2c1d7..d5e2b60bd 100644 --- a/vendor.conf +++ b/vendor.conf @@ -94,7 +94,7 @@ k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apim k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7 github.com/varlink/go 64e07fabffa33e385817b41971cf2674f692f391 -github.com/containers/buildah v1.8.1 +github.com/containers/buildah v1.8.2 # TODO: Gotty has not been updated since 2012. Can we find replacement? github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 github.com/fsouza/go-dockerclient v1.3.0 diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 13526057c..16f1a64fe 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -26,7 +26,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.8.1" + Version = "1.8.2" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index e8517eafb..070f4d04e 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -287,8 +287,8 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { ctx.SignaturePolicyPath = sigPolicy } authfile, err := c.Flags().GetString("authfile") - if err == nil && c.Flag("authfile").Changed { - ctx.AuthFilePath = authfile + if err == nil { + ctx.AuthFilePath = getAuthFile(authfile) } regConf, err := c.Flags().GetString("registries-conf") if err == nil && c.Flag("registries-conf").Changed { @@ -302,6 +302,13 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { return ctx, nil } +func getAuthFile(authfile string) string { + if authfile != "" { + return authfile + } + return os.Getenv("REGISTRY_AUTH_FILE") +} + func parseCreds(creds string) (string, string) { if creds == "" { return "", "" @@ -576,3 +583,22 @@ func IsolationOption(c *cobra.Command) (buildah.Isolation, error) { } return defaultIsolation() } + +// ScrubServer removes 'http://' or 'https://' from the front of the +// server/registry string if either is there. This will be mostly used +// for user input from 'buildah login' and 'buildah logout'. +func ScrubServer(server string) string { + server = strings.TrimPrefix(server, "https://") + return strings.TrimPrefix(server, "http://") +} + +// RegistryFromFullName gets the registry from the input. If the input is of the form +// quay.io/myuser/myimage, it will parse it and just return quay.io +// It also returns true if a full image name was given +func RegistryFromFullName(input string) string { + split := strings.Split(input, "/") + if len(split) > 1 { + return split[0] + } + return split[0] +} diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf index bec681e5c..051f98ab8 100644 --- a/vendor/github.com/containers/buildah/vendor.conf +++ b/vendor/github.com/containers/buildah/vendor.conf @@ -8,7 +8,7 @@ github.com/vbauerster/mpb v3.3.4 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 github.com/boltdb/bolt v1.3.1 -github.com/containers/storage v1.12.3 +github.com/containers/storage v1.12.6 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83 github.com/docker/docker-credential-helpers v0.6.1 diff --git a/version/version.go b/version/version.go index 29a576317..c63f8b820 100644 --- a/version/version.go +++ b/version/version.go @@ -4,7 +4,7 @@ package version // NOTE: remember to bump the version at the top // of the top-level README.md file when this is // bumped. -const Version = "1.3.0-dev" +const Version = "1.3.1-dev" // RemoteAPIVersion is the version for the remote // client API. It is used to determine compatibility |