diff options
76 files changed, 1929 insertions, 629 deletions
@@ -543,14 +543,18 @@ install.cni: install ${SELINUXOPT} -m 644 cni/87-podman-bridge.conflist ${DESTDIR}${ETCDIR}/cni/net.d/87-podman-bridge.conflist .PHONY: install.docker -install.docker: docker-docs - install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) $(DESTDIR)$(MANDIR)/man1 +install.docker: install ${SELINUXOPT} -m 755 docker $(DESTDIR)$(BINDIR)/docker - install ${SELINUXOPT} -m 644 docs/build/man/docker*.1 -t $(DESTDIR)$(MANDIR)/man1 install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR} ${DESTDIR}${TMPFILESDIR} install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-docker.conf -t ${DESTDIR}${TMPFILESDIR} +.PHONY: install.docker-docs +install.docker-docs: docker-docs + install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) $(DESTDIR)$(MANDIR)/man1 + install ${SELINUXOPT} -m 644 docs/build/man/docker*.1 -t $(DESTDIR)$(MANDIR)/man1 + .PHONY: install.systemd +ifneq (,$(findstring systemd,$(BUILDTAGS))) install.systemd: install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR} # User services @@ -563,6 +567,9 @@ install.systemd: install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.timer ${DESTDIR}${SYSTEMDDIR}/podman-auto-update.timer install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.socket ${DESTDIR}${SYSTEMDDIR}/podman.socket install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.service ${DESTDIR}${SYSTEMDDIR}/podman.service +else +install.systemd: +endif .PHONY: uninstall uninstall: diff --git a/RELEASE_PROCESS.md b/RELEASE_PROCESS.md new file mode 100644 index 000000000..52c08c3f2 --- /dev/null +++ b/RELEASE_PROCESS.md @@ -0,0 +1,269 @@ +# Podman Releases + +## Overview + +Podman (and podman-remote) versioning is mostly based on [semantic-versioning +standards](https://semver.org). +Significant versions +are tagged, including *release candidates* (`rc`). +All relevant **minor** releases (`vX.Y`) have their own branches. The **latest** +development efforts occur on the *master* branch. Branches with a +*rhel* suffix are use for long-term support of downstream RHEL releases. + +## Release workflow expectations + +* You have push access to the [upstream podman repository](https://github.com/containers/podman.git) +* You understand all basic `git` operations and concepts, like creating commits, + local vs. remote branches, rebasing, and conflict resolution. +* You have access to your public and private *GPG* keys. +* You have reliable internet access (i.e. not the public WiFi link at McDonalds) +* Other podman maintainers are online/available for assistance if needed. +* For a **major** release, you have 4-8 hours of time available, most of which will + be dedicated to writing release notes. +* For a **minor** or **patch** release, you have 2-4 hours of time available + (minimum depends largely on the speed/reliability of automated testing) + +# Releases + +## Major (***X***.y.z) release + +These releases always begin from *master*, and are contained in a branch +named with the **major** and **minor** version. **Major** release branches +begin in a *release candidate* phase, with prospective release tags being +created with an `-rc` suffix. There may be multiple *release candidate* +tags before the final/official **major** version is tagged and released. + +## Significant minor (x.**Y**.z) and patch (x.y.**Z**) releases + +Significant **minor** and **patch** level releases are normally +branched from *master*, but there are occsaional exceptions. +Additionally, these branches may be named with `-rhel` (or another) +suffix to signify a specialized purpose. For example, `-rhel` indicates +a release intended for downstream *RHEL* consumption. + +## Unreleased Milestones + +Non-release versions may occasionally appear tagged on a branch, without +the typical (major) receive media postings or artifact distribution. For +example, as required for the (separate) RHEL release process. Otherwise +these tags are simply milestones of reference purposes and may +generally be safely ignored. + +## Process + +***Note:*** This is intended as a guideline, and generalized process. +Not all steps are applicable in all situations. Not all steps are +spelled with complete minutiae. + +1. Make a `[CI:DOCS]` release notes pull request. + + 1. Ensure any/all intended PR's are completed and merged prior to any + processing of release notes. Ensure your local clone is fully up to + date with the remote upstream (`git remote update`). + 1. Check out (create) a local working branch for a release-notes PR, + based on the latest `upstream/master` or pre-existing version-named + branch - for example, if this is an additional *release-candidate* + you might use `vX.Y.Z-rc2`; **Note** this is a local branch name, + an upstream branch would never contain the `-rc?` suffix. + 1. Find all merged PRs since the last release, which were performed by + the merge-robot. For example, given the commit range `1234...5678` + you would run `git log --oneline --author=openshift-merge-robot 1234...5678`. + Keep this list open/available for reference as you edit. + 1. Edit `RELEASE_NOTES.md` + + * If operating on a *release-candidate*, be sure to remove any + not-applicable items/sections. For example, those brought in + because of backports. + * Add/update the version-section of with sub-sections for *Features* + (new functionality), *Changes* (Altered podman behaviors), + *Bugfixes* (self-explanatory), *API* (All related features, + changes, and bugfixes), and *Misc* (include any **major** + library bumps, e.g. `c/buildah`, `c/storage`, `c/common`, etc). + * Use your merge-bot reference PR-listing to examine each PR in turn, + adding an entry for it into the appropriate section. + + * Be sure to link any issue the PR fixed. + * Do not include any PRs that are only documentation or test/automation + changes. + * Do not include any PRs that fix bugs which we introduced due to + new features/enhancements. In other words, if it was working, broke, then + got fixed, there's no need to mention those items. + + 1. Commit and **sign** the `RELEASE_NOTES.md` changes, using the description + `Create release notes for vX.Y.Z` (where `X`, `Y`, and `Z` are the + actual version numbers). + 1. Push your working branch to your github fork and create a new pull request. + + * ***Ensure*** you properly select the base branch if not *master*. + For example, `vX.y.Z`. + * ***Before submitting*** the new PR, update the title with the + prefix `[CI:DOCS]` to avoid triggering lengthy automated testing. + + 1. If this is a release on a pre-existing version-named branch + (e.x. *release-candidate* or `-rhel`), open another PR against + the upstream *master* branch. This is needed to ensure the new + notes are present for future releases. + + +1. Create a new upstream release branch (if none already exist). + + 1. After the release-notes pull requests have merged, a release branch is + needed. Branching ensures all changes are curated before inclusion in the + release, and no new features land after the *release-candidate* phases + are complete. + 1. Ensure your local clone is fully up to date with the remote upstream + (`git remote update`). Switch to this branch (`git checkout upstream/master`). + 1. Make a new local branch for the release based on *master*. For example, + `git checkout -b vX.Y`. Where `X.Y` represent the complete release + version-name, including any suffix (if any) like `-rhel`. ***DO NOT*** + include any `-rc` suffix in the branch name. + 1. Edit the `.cirrus.yml` file, changing the `DEST_BRANCH` value (under the + `env` section) to the new, complete branch name (e.x. `vX.Y`). + Commit and sign, using the description + `Cirrus: Update operating branch`. + 1. Push the new branch otherwise unmodified (`git push upstream vX.Y`). + 1. Automation will begin executing on the branch immediately. Because + the repository allows out-of-sequence PR merging, it is possible that + merge order introduced bugs/defects. To establish a clean + baseline, observe the initial CI run on the branch for any unexpected + failures. This can be done by going directly to + `https://cirrus-ci.com/github/containers/podman/vX.Y` + 1. If there are CI test or automation boops that need fixing on the branch, + attend to them using normal PR process (to *master* first, then backport + changes to the new branch). Ideally, CI should be "green" on the new + branch before proceeding. + +1. Create a new branch-verification Cirrus-Cron entry. + + 1. This is to ensure CI's VM image timestamps are refreshed. Without this, + the VM images ***will*** be permanently pruned after 60 days of inactivity + and are hard/impossible to re-create accurately. + 1. Go to + [https://cirrus-ci.com/github/containers/podman](https://cirrus-ci.com/github/containers/podman) + and press the "gear" (Repository Settings) button on the top-right. + 1. At the bottom of the settings page is a table of cron-job names, branches, + schedule, and recent status. Below that is an editable new-entry line. + 1. Set the new job's `name` and `branch` to the name of new release branch. + 1. Set the `expression` using the form `X X X ? * 1-6` where 'X' is a number + between 0-23 and not already taken by another job in the table. The 1-hour + interval is used because it takes about that long for the job to run. + 1. Add the new job by pressing the `+` button on the right-side of the + new-entry line. + +1. Update version numbers and push tag + + **TODO:** This process can be simplified by updating the script for the + "Optional Release Test" such that it tests the first commit, not the second. + In this way, pushing twice to the same PR won't be required. + + 1. Assuming CI Test and automation ran clean on the release branch, + update your local repo to be fully up to date with the remote upstream + (`git remote update`). Check out a local copy of the upstream + release branch (`git checkout upstream/vX.Y`). + 1. Create a new local working-branch to develop the release PR, + `git checkout -b bump_vX.Y.Z`. + 1. Lookup the *COMMIT ID* of the last release, + `git log -1 $(git tag | sort -V | tail -1)`. + 1. Run `make changelog CHANGELOG_BASE=`*COMMIT ID*. This will modify the + `changelog.txt` file. Manually edit it to change the first line + (“Changelog for …”) to include the current (new) release version number. + For example, `- Changelog for v2.1.0 (2020-09-22):`. + 1. Edit `version/version.go` and bump the `Version` value to the new + release version. If there were API changes, also bump `APIVersion` value. + 1. Commit this and sign the commit (`git commit -a -s -S`). The commit message + should be `Bump to vX.Y.Z` (using the actual version numbers). + 1. Push this single change to your github fork, and make a new PR, + **being careful** to select the proper release branch as its base. + 1. Wait for all automated tests pass (including on an RC-branch PR). Re-running + and/or updating code as needed. + 1. In the PR, under the *Checks* tab, locate and clock on the Cirrus-CI + task `Optional Release Test`. In the right-hand window pane, click + the `trigger` button and wait for the test to go green. *This is a + critical step* which confirms the commit is worthy of becoming a release. + 1. Tag the `Bump to vX.Y.Z` commit as a release by running + `git tag -s -m 'vX.Y.Z' vX.Y.Z $HASH` where `$HASH` is specified explicitly + and carefully, to avoid (basically) unfixable accidents (if they are pushed). + 1. Change `version/version.go` again. This time, bump the **patch** version and + re-add the `-dev` suffix to indicate this is a non-released version of Podman. + 1. Change `contrib/spec/podman.spec.in`, bumping **patch** number of `Version`. + 1. Commit these changes with the message `Bump to X.Y.Z-dev`. + 1. Push your local branch to your github fork (and the PR) again. + 1. The PR should now have two commits that look very similar to + https://github.com/containers/podman/pull/7787 + 1. Wait for at least all the "Build" and "Verify" (or similar) CI Testing + steps to complete successfully. No need to wait for complete integration + 4and system-testing (it was already done on substantially the same code, above). + 1. Merge the PR (or ask someone else to review and merge, to be safer). + 1. **Note:** This is the last point where any test-failures can be addressed + by code changes. After pushing the new version-tag upstream, no further + changes can be made to the code without lots of unpleasent efforts. Please + seek assistance if needed, before proceeding. + + 1. Assuming the "Bump to ..." PR merged successfully, and you're **really** + confident the correct commit has been tagged, push it with + `git push upstream vX.Y.Z` + +1. Locate, Verify release testing is proceeding + + 1. When the tag was pushed, an automated build was created. Locate this + by starting from + `https://github.com/containers/podman/tags` and finding the recent entry + for the pushed tag. Under the tag name will be a timestamp and abbrieviated + commit hash, for example `<> 5b2585f`. Click the commit-hash link. + 1. In the upper-left most corner, just to the left of the "Bump to vX.Y" + text, will be a small status icon (Yellow circle, Red "X", or green check). + Click this, to open a small pop-up/overlay window listing all the status + checks. + 1. In the small pop-up/overlay window, press the "Details" link on one of the + Cirrus-CI status check entries (doesn't matter which one). + 1. On the following page, in the lower-right pane, will be a "View more details + on Cirrus CI" link, click this. + 1. A Cirrus-CI task details page will open, click the button labeled + "View All Tasks". + 1. Keep this page open to monitor its progress and for use in future steps. + +1. Bump master `-dev` version + + 1. If you made a release branch and bumped **major** or **minor** version + Complete the "Update version numbers and push tag" steps above on the + *master* branch. Bump the **minor** version and set the **patch** + version to 0. For example, after pushing the v2.2.0 release, *master* + should be set to v2.3.0-dev. + 1. Create a "Bump to vX.Y.Z-dev" commit with these changes. + 1. Bump the version number in `README.md` (still on on *master*) + to reflect the new release. Commit these changes. + 1. Create a PR with the above commits, and oversee it's merging. + +1. Create Github Release entry and upload assets + + 1. Return to the Cirrus-CI Build page for the new release tag, confirm + (or wait for) it to complete, re-running any failed tasks as appropriate. + 1. For anything other than an RC, download the new release artifacts + (the binaries which were actually tested). Visit each of the + "Build for ...", "Static Build", and "... Cross" tasks. + 1. Under the "Artifacts" section of each task, click the "gosrc" item, + find and download the release archive (`zip`, `tar.gz` or `.msi`). + Save the the archive with a meaningful name, for example + `podman-v3.0.0.msi`. + 1. For the "Static Build" task, find the compiled `podman` and `podman-remote` + binaries under the "binary", "bin" links. Tar these files as + `podman-static.tar.gz`. + 1. In the directory where you downloaded the archives, run + `sha256sum *.tar.gz *.zip *.msi > shasums` to generate SHA sums. + 1. Go to `https://github.com/containers/podman/releases/tag/vX.Y.Z` and + press the "Edit Release" button. Change the name to the form `vX.Y.Z` + 1. If this is a release candidate be certain to click the pre-release + checkbox at the bottom of the page. + 1. Copy and paste the release notes for the release into the body of + the release. + 1. Near the bottom of the page there is a box with the message + “Add binaries by dropping them here or selecting them”. Use + that to upload the artifacts you previously downloaded, including + the `shasums` file. + + * podman-remote-release-darwin.zip + * podman-remote-release-windows.zip + * podman-remote-static.tar.gz + * podman-vX.Y.Z.msi + * shasums + 1. Save the release. diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go index 78611371d..f945c9c54 100644 --- a/cmd/podman/common/create_opts.go +++ b/cmd/podman/common/create_opts.go @@ -311,6 +311,15 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup netInfo.CNINetworks = []string{string(cc.HostConfig.NetworkMode)} } + parsedTmp := make([]string, 0, len(cc.HostConfig.Tmpfs)) + for path, options := range cc.HostConfig.Tmpfs { + finalString := path + if options != "" { + finalString += ":" + options + } + parsedTmp = append(parsedTmp, finalString) + } + // Note: several options here are marked as "don't need". this is based // on speculation by Matt and I. We think that these come into play later // like with start. We believe this is just a difference in podman/compat @@ -367,7 +376,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup StorageOpt: stringMaptoArray(cc.HostConfig.StorageOpt), Sysctl: stringMaptoArray(cc.HostConfig.Sysctls), Systemd: "true", // podman default - TmpFS: stringMaptoArray(cc.HostConfig.Tmpfs), + TmpFS: parsedTmp, TTY: cc.Config.Tty, User: cc.Config.User, UserNS: string(cc.HostConfig.UsernsMode), diff --git a/cmd/podman/common/netflags.go b/cmd/podman/common/netflags.go index bc4d54de0..4d0a554a6 100644 --- a/cmd/podman/common/netflags.go +++ b/cmd/podman/common/netflags.go @@ -80,7 +80,7 @@ func DefineNetFlags(cmd *cobra.Command) { _ = cmd.RegisterFlagCompletionFunc(publishFlagName, completion.AutocompleteNone) netFlags.Bool( - "no-hosts", false, + "no-hosts", containerConfig.Containers.NoHosts, "Do not create /etc/hosts within the container, instead use the version from the image", ) } diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go index d7507775f..af9278ce1 100644 --- a/cmd/podman/containers/create.go +++ b/cmd/podman/containers/create.go @@ -166,7 +166,11 @@ func createInit(c *cobra.Command) error { return errors.Errorf("--cpu-quota and --cpus cannot be set together") } - if c.Flag("no-hosts").Changed && c.Flag("add-host").Changed { + noHosts, err := c.Flags().GetBool("no-hosts") + if err != nil { + return err + } + if noHosts && c.Flag("add-host").Changed { return errors.Errorf("--no-hosts and --add-host cannot be set together") } cliVals.UserNS = c.Flag("userns").Value.String() diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go index d6bf761db..de532ed78 100644 --- a/cmd/podman/images/build.go +++ b/cmd/podman/images/build.go @@ -265,6 +265,9 @@ func build(cmd *cobra.Command, args []string) error { } report, err := registry.ImageEngine().Build(registry.GetContext(), containerFiles, *apiBuildOpts) + if err != nil { + return err + } if cmd.Flag("iidfile").Changed { f, err := os.Create(buildOpts.Iidfile) @@ -276,7 +279,7 @@ func build(cmd *cobra.Command, args []string) error { } } - return err + return nil } // buildFlagsWrapperToOptions converts the local build flags to the build options used @@ -509,6 +512,11 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil TransientMounts: flags.Volumes, } + if c.Flag("timestamp").Changed { + timestamp := time.Unix(flags.Timestamp, 0).UTC() + opts.Timestamp = ×tamp + } + return &entities.BuildOptions{BuildOptions: opts}, nil } diff --git a/docs/source/markdown/podman-image-sign.1.md b/docs/source/markdown/podman-image-sign.1.md index 3e52bde30..fc0f55e44 100644 --- a/docs/source/markdown/podman-image-sign.1.md +++ b/docs/source/markdown/podman-image-sign.1.md @@ -37,7 +37,7 @@ Store the signatures in the specified directory. Default: /var/lib/containers/s Override the default identity of the signature. ## EXAMPLES -Sign the busybox image with the identify of foo@bar.com with a user's keyring and save the signature in /tmp/signatures/. +Sign the busybox image with the identity of foo@bar.com with a user's keyring and save the signature in /tmp/signatures/. sudo podman image sign --sign-by foo@bar.com --directory /tmp/signatures docker://privateregistry.example.com/foobar @@ -6,6 +6,7 @@ require ( github.com/BurntSushi/toml v0.3.1 github.com/blang/semver v3.5.1+incompatible github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37 + github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7 github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/containernetworking/cni v0.8.1 @@ -18,7 +19,7 @@ require ( github.com/containers/psgo v1.5.2 github.com/containers/storage v1.25.0 github.com/coreos/go-systemd/v22 v22.1.0 - github.com/cri-o/ocicni v0.2.1-0.20201204103948-b6cbe99b9756 + github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf github.com/cyphar/filepath-securejoin v0.2.2 github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution v2.7.1+incompatible @@ -51,7 +52,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 - github.com/rootless-containers/rootlesskit v0.13.2 + github.com/rootless-containers/rootlesskit v0.14.0-beta.0 github.com/sirupsen/logrus v1.8.0 github.com/spf13/cobra v1.1.3 github.com/spf13/pflag v1.0.5 @@ -57,6 +57,8 @@ github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37/go.mod h1:u9UyCz2eTrS github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7 h1:ZmSAEFFtv3mepC4/Ze6E/hi6vGZlhRvywqp1l+w+qqw= +github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7/go.mod h1:Kp3ezoDVdhfYxZUtgs4OL8sVvgOLz3txk0sbQD0opvw= github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM= github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= @@ -89,7 +91,6 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= @@ -103,18 +104,17 @@ github.com/containers/common v0.35.0 h1:1OLZ2v+Tj/CN9BTQkKZ5VOriOiArJedinMMqfJRU github.com/containers/common v0.35.0/go.mod h1:gs1th7XFTOvVUl4LDPdQjOfOeNiVRDbQ7CNrZ0wS6F8= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.10.1 h1:tHhGQ8RCMxJfJLD/PEW1qrOKX8nndledW9qz6UiAxns= github.com/containers/image/v5 v5.10.1/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs= github.com/containers/image/v5 v5.10.2 h1:STD9GYR9p/X0qTLmBYsyx8dEM7zQW+qZ8KHoL/64fkg= github.com/containers/image/v5 v5.10.2/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c= github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g= github.com/containers/ocicrypt v1.1.0 h1:A6UzSUFMla92uxO43O6lm86i7evMGjTY7wTKB2DyGPY= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/psgo v1.5.2 h1:3aoozst/GIwsrr/5jnFy3FrJay98uujPCu9lTuSZ/Cw= github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU= +github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM= github.com/containers/storage v1.24.5 h1:BusfdU0rCS2/Daa/DPw+0iLfGRlYA7UVF7D0el3N7Vk= github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= github.com/containers/storage v1.24.6/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= @@ -134,12 +134,11 @@ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cri-o/ocicni v0.2.1-0.20201204103948-b6cbe99b9756 h1:4T3rzrCSvMgVTR+fm526d+Ed0BurAHGjOaaNFOVoK6E= -github.com/cri-o/ocicni v0.2.1-0.20201204103948-b6cbe99b9756/go.mod h1:vingr1ztOAzP2WyTgGbpMov9dFhbjNxdLtDv0+PhAvY= +github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf h1:k2wrxBiBseRfOD7h+9fABEuesABBQuUuW5fWwpARbeI= +github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf/go.mod h1:vingr1ztOAzP2WyTgGbpMov9dFhbjNxdLtDv0+PhAvY= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= @@ -156,7 +155,6 @@ github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BU github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible h1:+0LETFJcCLdIqdtEbVWF1JIxATqM15Y4sLiMcWOYq2U= github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.0-beta1.0.20201113105859-b6bfff2a628f+incompatible h1:lwpV3629md5omgAKjxPWX17shI7vMRpE3nyb9WHn8pA= github.com/docker/docker v20.10.0-beta1.0.20201113105859-b6bfff2a628f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -174,12 +172,10 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehP github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -224,7 +220,6 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -342,6 +337,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc= github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= @@ -353,7 +349,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -374,10 +369,10 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw= github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= @@ -414,7 +409,7 @@ github.com/moby/term v0.0.0-20200429084858-129dac9f73f6/go.mod h1:or9wGItza1sRcM github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf h1:Un6PNx5oMK6CCwO3QTUyPiK2mtZnPrpDl5UnZ64eCkw= github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/moby/vpnkit v0.4.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ= +github.com/moby/vpnkit v0.5.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -435,6 +430,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -442,7 +438,6 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= @@ -454,7 +449,6 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= @@ -480,6 +474,7 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df/go.m github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU= github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/openshift/imagebuilder v1.1.8 h1:gjiIl8pbNj0eC4XWvFJHATdDvYm64p9/pLDLQWoLZPA= @@ -526,8 +521,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rootless-containers/rootlesskit v0.13.2 h1:NoSyGw0+0Js0L6nI/rfm8laV0QBI+sUxjFSGWfQgtr0= -github.com/rootless-containers/rootlesskit v0.13.2/go.mod h1:P+T/zWEzrIidEJIsYkuVWFLPebBvdehdIem7s36glh8= +github.com/rootless-containers/rootlesskit v0.14.0-beta.0 h1:S0VzvU7sEvqCTkxPAxzJ1OZpG9a8oG9FSwkVhk0b8PM= +github.com/rootless-containers/rootlesskit v0.14.0-beta.0/go.mod h1:5UDnrX52Dyoyz2lK66mjHftWpK9YSp1ghO+fY1ZkxFc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -545,7 +540,6 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.0 h1:nfhvjKcUMhBMVqbKHJlk5RPrrfYr/NMo3692g0dwfWU= github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= @@ -556,7 +550,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= @@ -610,6 +603,7 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -699,7 +693,6 @@ golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -767,7 +760,6 @@ golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -843,9 +835,7 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= @@ -863,7 +853,6 @@ gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -887,7 +876,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -920,7 +908,6 @@ k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 6f2eaeab2..122dd080f 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -1681,6 +1681,104 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf return err } +// SafeRewriteContainerConfig rewrites a container's configuration in a more +// limited fashion than RewriteContainerConfig. It is marked as safe to use +// under most circumstances, unlike RewriteContainerConfig. +// DO NOT USE TO: Change container dependencies, change pod membership, change +// locks, change container ID. +func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error { + if !s.valid { + return define.ErrDBClosed + } + + if !ctr.valid { + return define.ErrCtrRemoved + } + + if newName != "" && newCfg.Name != newName { + return errors.Wrapf(define.ErrInvalidArg, "new name %s for container %s must match name in given container config", newName, ctr.ID()) + } + if newName != "" && oldName == "" { + return errors.Wrapf(define.ErrInvalidArg, "must provide old name for container if a new name is given") + } + + newCfgJSON, err := json.Marshal(newCfg) + if err != nil { + return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID()) + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + err = db.Update(func(tx *bolt.Tx) error { + if newName != "" { + idBkt, err := getIDBucket(tx) + if err != nil { + return err + } + namesBkt, err := getNamesBucket(tx) + if err != nil { + return err + } + allCtrsBkt, err := getAllCtrsBucket(tx) + if err != nil { + return err + } + + needsRename := true + if exists := namesBkt.Get([]byte(newName)); exists != nil { + if string(exists) == ctr.ID() { + // Name already associated with the ID + // of this container. No need for a + // rename. + needsRename = false + } else { + return errors.Wrapf(define.ErrCtrExists, "name %s already in use, cannot rename container %s", newName, ctr.ID()) + } + } + + if needsRename { + // We do have to remove the old name. The other + // buckets are ID-indexed so we just need to + // overwrite the values there. + if err := namesBkt.Delete([]byte(oldName)); err != nil { + return errors.Wrapf(err, "error deleting container %s old name from DB for rename", ctr.ID()) + } + if err := idBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { + return errors.Wrapf(err, "error renaming container %s in ID bucket in DB", ctr.ID()) + } + if err := namesBkt.Put([]byte(newName), []byte(ctr.ID())); err != nil { + return errors.Wrapf(err, "error adding new name %s for container %s in DB", newName, ctr.ID()) + } + if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { + return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID()) + } + } + } + + ctrBkt, err := getCtrBucket(tx) + if err != nil { + return err + } + + ctrDB := ctrBkt.Bucket([]byte(ctr.ID())) + if ctrDB == nil { + ctr.valid = false + return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) + } + + if err := ctrDB.Put(configKey, newCfgJSON); err != nil { + return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID()) + } + + return nil + }) + return err +} + // RewritePodConfig rewrites a pod's configuration. // WARNING: This function is DANGEROUS. Do not use without reading the full // comment on this function in state.go. diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 2e0c24579..7e8226de4 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -13,6 +13,7 @@ import ( "strings" "time" + metadata "github.com/checkpoint-restore/checkpointctl/lib" "github.com/containers/buildah/copier" "github.com/containers/common/pkg/secrets" "github.com/containers/podman/v3/libpod/define" @@ -135,7 +136,7 @@ func (c *Container) ControlSocketPath() string { // CheckpointPath returns the path to the directory containing the checkpoint func (c *Container) CheckpointPath() string { - return filepath.Join(c.bundlePath(), "checkpoint") + return filepath.Join(c.bundlePath(), metadata.CheckpointDirectory) } // PreCheckpointPath returns the path to the directory containing the pre-checkpoint-images @@ -2141,26 +2142,11 @@ func (c *Container) canWithPrevious() error { return err } -// writeJSONFile marshalls and writes the given data to a JSON file -// in the bundle path -func (c *Container) writeJSONFile(v interface{}, file string) error { - fileJSON, err := json.MarshalIndent(v, "", " ") - if err != nil { - return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID()) - } - file = filepath.Join(c.bundlePath(), file) - if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil { - return err - } - - return nil -} - // prepareCheckpointExport writes the config and spec to // JSON files for later export func (c *Container) prepareCheckpointExport() error { // save live config - if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil { + if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil { return err } @@ -2171,7 +2157,7 @@ func (c *Container) prepareCheckpointExport() error { logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err) return err } - if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil { + if _, err := metadata.WriteJSONFile(g.Config, c.bundlePath(), metadata.SpecDumpFile); err != nil { return err } diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index dc0418148..2684c2845 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -19,6 +19,7 @@ import ( "syscall" "time" + metadata "github.com/checkpoint-restore/checkpointctl/lib" cnitypes "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/buildah/pkg/chrootuser" @@ -33,6 +34,7 @@ import ( "github.com/containers/podman/v3/libpod/events" "github.com/containers/podman/v3/pkg/annotations" "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/podman/v3/pkg/checkpoint/crutils" "github.com/containers/podman/v3/pkg/criu" "github.com/containers/podman/v3/pkg/lookup" "github.com/containers/podman/v3/pkg/resolvconf" @@ -884,80 +886,32 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile) includeFiles := []string{ - "checkpoint", "artifacts", "ctr.log", - "config.dump", - "spec.dump", - "network.status"} + metadata.CheckpointDirectory, + metadata.ConfigDumpFile, + metadata.SpecDumpFile, + metadata.NetworkStatusFile, + } if options.PreCheckPoint { includeFiles[0] = "pre-checkpoint" } // Get root file-system changes included in the checkpoint archive - rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar") - deleteFilesList := filepath.Join(c.bundlePath(), "deleted.files") + var addToTarFiles []string if !options.IgnoreRootfs { // To correctly track deleted files, let's go through the output of 'podman diff' - tarFiles, err := c.runtime.GetDiff("", c.ID()) + rootFsChanges, err := c.runtime.GetDiff("", c.ID()) if err != nil { - return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath) + return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID()) } - var rootfsIncludeFiles []string - var deletedFiles []string - - for _, file := range tarFiles { - if file.Kind == archive.ChangeAdd { - rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path) - continue - } - if file.Kind == archive.ChangeDelete { - deletedFiles = append(deletedFiles, file.Path) - continue - } - fileName, err := os.Stat(file.Path) - if err != nil { - continue - } - if !fileName.IsDir() && file.Kind == archive.ChangeModify { - rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path) - continue - } - } - - if len(rootfsIncludeFiles) > 0 { - rootfsTar, err := archive.TarWithOptions(c.state.Mountpoint, &archive.TarOptions{ - Compression: archive.Uncompressed, - IncludeSourceDir: true, - IncludeFiles: rootfsIncludeFiles, - }) - if err != nil { - return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath) - } - rootfsDiffFile, err := os.Create(rootfsDiffPath) - if err != nil { - return errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath) - } - defer rootfsDiffFile.Close() - _, err = io.Copy(rootfsDiffFile, rootfsTar) - if err != nil { - return err - } - includeFiles = append(includeFiles, "rootfs-diff.tar") + addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath()) + if err != nil { + return err } - if len(deletedFiles) > 0 { - formatJSON, err := json.MarshalIndent(deletedFiles, "", " ") - if err != nil { - return errors.Wrapf(err, "error creating delete files list file %q", deleteFilesList) - } - if err := ioutil.WriteFile(deleteFilesList, formatJSON, 0600); err != nil { - return errors.Wrap(err, "error creating delete files list file") - } - - includeFiles = append(includeFiles, "deleted.files") - } + includeFiles = append(includeFiles, addToTarFiles...) } // Folder containing archived volumes that will be included in the export @@ -1034,8 +988,9 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { return err } - os.Remove(rootfsDiffPath) - os.Remove(deleteFilesList) + for _, file := range addToTarFiles { + os.Remove(filepath.Join(c.bundlePath(), file)) + } if !options.IgnoreVolumes { os.RemoveAll(expVolDir) @@ -1054,23 +1009,6 @@ func (c *Container) checkpointRestoreSupported() error { return nil } -func (c *Container) checkpointRestoreLabelLog(fileName string) error { - // Create the CRIU log file and label it - dumpLog := filepath.Join(c.bundlePath(), fileName) - - logFile, err := os.OpenFile(dumpLog, os.O_CREATE, 0600) - if err != nil { - return errors.Wrap(err, "failed to create CRIU log file") - } - if err := logFile.Close(); err != nil { - logrus.Error(err) - } - if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil { - return err - } - return nil -} - func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error { if err := c.checkpointRestoreSupported(); err != nil { return err @@ -1084,7 +1022,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used") } - if err := c.checkpointRestoreLabelLog("dump.log"); err != nil { + if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "dump.log", c.MountLabel()); err != nil { return err } @@ -1095,11 +1033,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO // Save network.status. This is needed to restore the container with // the same IP. Currently limited to one IP address in a container // with one interface. - formatJSON, err := json.MarshalIndent(c.state.NetworkStatus, "", " ") - if err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(c.bundlePath(), "network.status"), formatJSON, 0644); err != nil { + if _, err := metadata.WriteJSONFile(c.state.NetworkStatus, c.bundlePath(), metadata.NetworkStatusFile); err != nil { return err } @@ -1115,7 +1049,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO } if options.TargetFile != "" { - if err = c.exportCheckpoint(options); err != nil { + if err := c.exportCheckpoint(options); err != nil { return err } } @@ -1135,8 +1069,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO cleanup := []string{ "dump.log", "stats-dump", - "config.dump", - "spec.dump", + metadata.ConfigDumpFile, + metadata.SpecDumpFile, } for _, del := range cleanup { file := filepath.Join(c.bundlePath(), del) @@ -1151,28 +1085,13 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO } func (c *Container) importCheckpoint(input string) error { - archiveFile, err := os.Open(input) - if err != nil { - return errors.Wrap(err, "failed to open checkpoint archive for import") - } - - defer archiveFile.Close() - options := &archive.TarOptions{ - ExcludePatterns: []string{ - // config.dump and spec.dump are only required - // container creation - "config.dump", - "spec.dump", - }, - } - err = archive.Untar(archiveFile, c.bundlePath(), options) - if err != nil { - return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input) + if err := crutils.CRImportCheckpointWithoutConfig(c.bundlePath(), input); err != nil { + return err } // Make sure the newly created config.json exists on disk g := generate.Generator{Config: c.config.Spec} - if err = c.saveSpec(g.Config); err != nil { + if err := c.saveSpec(g.Config); err != nil { return errors.Wrap(err, "saving imported container specification for restore failed") } @@ -1221,7 +1140,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore") } - if err := c.checkpointRestoreLabelLog("restore.log"); err != nil { + if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil { return err } @@ -1244,7 +1163,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Read network configuration from checkpoint // Currently only one interface with one IP is supported. - networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status")) + networkStatus, _, err := metadata.ReadContainerCheckpointNetworkStatus(c.bundlePath()) // If the restored container should get a new name, the IP address of // the container will not be restored. This assumes that if a new name is // specified, the container is restored multiple times. @@ -1254,43 +1173,14 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) { // The file with the network.status does exist. Let's restore the // container with the same IP address / MAC address as during checkpointing. - defer networkStatusFile.Close() - var networkStatus []*cnitypes.Result - networkJSON, err := ioutil.ReadAll(networkStatusFile) - if err != nil { - return err - } - if err := json.Unmarshal(networkJSON, &networkStatus); err != nil { - return err - } if !options.IgnoreStaticIP { - // Take the first IP address - var IP net.IP - if len(networkStatus) > 0 { - if len(networkStatus[0].IPs) > 0 { - IP = networkStatus[0].IPs[0].Address.IP - } - } - if IP != nil { + if IP := metadata.GetIPFromNetworkStatus(networkStatus); IP != nil { // Tell CNI which IP address we want. c.requestedIP = IP } } if !options.IgnoreStaticMAC { - // Take the first device with a defined sandbox. - var MAC net.HardwareAddr - if len(networkStatus) > 0 { - for _, n := range networkStatus[0].Interfaces { - if n.Sandbox != "" { - MAC, err = net.ParseMAC(n.Mac) - if err != nil { - return err - } - break - } - } - } - if MAC != nil { + if MAC := metadata.GetMACFromNetworkStatus(networkStatus); MAC != nil { // Tell CNI which MAC address we want. c.requestedMAC = MAC } @@ -1398,36 +1288,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Before actually restarting the container, apply the root file-system changes if !options.IgnoreRootfs { - rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar") - if _, err := os.Stat(rootfsDiffPath); err == nil { - // Only do this if a rootfs-diff.tar actually exists - rootfsDiffFile, err := os.Open(rootfsDiffPath) - if err != nil { - return errors.Wrap(err, "failed to open root file-system diff file") - } - defer rootfsDiffFile.Close() - if err := c.runtime.ApplyDiffTarStream(c.ID(), rootfsDiffFile); err != nil { - return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath) - } + if err := crutils.CRApplyRootFsDiffTar(c.bundlePath(), c.state.Mountpoint); err != nil { + return err } - deletedFilesPath := filepath.Join(c.bundlePath(), "deleted.files") - if _, err := os.Stat(deletedFilesPath); err == nil { - var deletedFiles []string - deletedFilesJSON, err := ioutil.ReadFile(deletedFilesPath) - if err != nil { - return errors.Wrapf(err, "failed to read deleted files file") - } - if err := json.Unmarshal(deletedFilesJSON, &deletedFiles); err != nil { - return errors.Wrapf(err, "failed to unmarshal deleted files file %s", deletedFilesPath) - } - for _, deleteFile := range deletedFiles { - // Using RemoveAll as deletedFiles, which is generated from 'podman diff' - // lists completely deleted directories as a single entry: 'D /root'. - err = os.RemoveAll(filepath.Join(c.state.Mountpoint, deleteFile)) - if err != nil { - return errors.Wrapf(err, "failed to delete files from container %s during restore", c.ID()) - } - } + + if err := crutils.CRRemoveDeletedFiles(c.ID(), c.bundlePath(), c.state.Mountpoint); err != nil { + return err } } @@ -1452,7 +1318,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err != nil { logrus.Debugf("Non-fatal: removal of pre-checkpoint directory (%s) failed: %v", c.PreCheckPointPath(), err) } - cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status", "rootfs-diff.tar", "deleted.files"} + cleanup := [...]string{ + "restore.log", + "dump.log", + "stats-dump", + "stats-restore", + metadata.NetworkStatusFile, + metadata.RootFsDiffTar, + metadata.DeletedFilesFile, + } for _, del := range cleanup { file := filepath.Join(c.bundlePath(), del) err = os.Remove(file) diff --git a/libpod/container_log.go b/libpod/container_log.go index a3b700004..c207df819 100644 --- a/libpod/container_log.go +++ b/libpod/container_log.go @@ -29,7 +29,6 @@ func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logCh case define.NoLogging: return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs") case define.JournaldLogging: - // TODO Skip sending logs until journald logs can be read return c.readFromJournal(ctx, options, logChannel) case define.JSONLogging: // TODO provide a separate implementation of this when Conmon diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go index 5792633b0..4a541b6e7 100644 --- a/libpod/container_log_linux.go +++ b/libpod/container_log_linux.go @@ -52,6 +52,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption if time.Now().Before(options.Since) { return nil } + // coreos/go-systemd/sdjournal expects a negative time.Duration for times in the past config.Since = -time.Since(options.Since) } config.Matches = append(config.Matches, journal.Match{ diff --git a/libpod/image/image.go b/libpod/image/image.go index 7c760a79a..265178ad5 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -143,7 +143,7 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) { // New creates a new image object where the image could be local // or remote -func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType) (*Image, error) { +func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType, progress chan types.ProgressProperties) (*Image, error) { span, _ := opentracing.StartSpanFromContext(ctx, "newImage") span.SetTag("type", "runtime") defer span.Finish() @@ -162,7 +162,7 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile if signaturePolicyPath == "" { signaturePolicyPath = ir.SignaturePolicyPath } - imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label) + imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label, progress) if err != nil { return nil, err } @@ -323,7 +323,7 @@ func (ir *Runtime) LoadAllImagesFromDockerArchive(ctx context.Context, fileName } defer goal.cleanUp() - imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil) + imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil, nil) if err != nil { return nil, err } diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go index 1ea4f6c11..3e6e7b9db 100644 --- a/libpod/image/image_test.go +++ b/libpod/image/image_test.go @@ -94,9 +94,9 @@ func TestImage_NewFromLocal(t *testing.T) { ir, err := NewImageRuntimeFromOptions(so) assert.NoError(t, err) ir.Eventer = events.NewNullEventer() - bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing) + bb, err := ir.New(context.Background(), "docker.io/library/busybox:latest", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil) assert.NoError(t, err) - bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing) + bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil) assert.NoError(t, err) tm := makeLocalMatrix(bb, bbglibc) @@ -140,7 +140,7 @@ func TestImage_New(t *testing.T) { // Iterate over the names and delete the image // after the pull for _, img := range names { - newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing) + newImage, err := ir.New(context.Background(), img, "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing, nil) assert.NoError(t, err) assert.NotEqual(t, newImage.ID(), "") err = newImage.Remove(context.Background(), false) @@ -169,7 +169,7 @@ func TestImage_MatchRepoTag(t *testing.T) { ir, err := NewImageRuntimeFromOptions(so) assert.NoError(t, err) ir.Eventer = events.NewNullEventer() - newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, nil, util.PullImageMissing) + newImage, err := ir.New(context.Background(), "busybox", "", "", os.Stdout, nil, SigningOptions{}, nil, util.PullImageMissing, nil) assert.NoError(t, err) err = newImage.TagImage("foo:latest") assert.NoError(t, err) diff --git a/libpod/image/layer_tree.go b/libpod/image/layer_tree.go index dde39dba1..aa3084449 100644 --- a/libpod/image/layer_tree.go +++ b/libpod/image/layer_tree.go @@ -4,7 +4,6 @@ import ( "context" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -188,7 +187,12 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { node, exists := t.nodes[child.TopLayer()] if !exists { - return nil, errors.Errorf("layer not found in layer tree: %q", child.TopLayer()) + // Note: erroring out in this case has turned out having been a + // mistake. Users may not be able to recover, so we're now + // throwing a warning to guide them to resolve the issue and + // turn the errors non-fatal. + logrus.Warnf("Layer %s not found in layer. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer()) + return nil, nil } childOCI, err := t.toOCI(ctx, child) diff --git a/libpod/image/pull.go b/libpod/image/pull.go index 3cb1e57c7..c5fafc25d 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -6,6 +6,7 @@ import ( "io" "path/filepath" "strings" + "time" "github.com/containers/common/pkg/retry" cp "github.com/containers/image/v5/copy" @@ -241,7 +242,7 @@ func toLocalImageName(imageName string) string { // pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries. // Use pullImageFromReference if the source is known precisely. -func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) { +func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string, progress chan types.ProgressProperties) ([]string, error) { span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromHeuristicSource") defer span.Finish() @@ -275,7 +276,7 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s } } defer goal.cleanUp() - return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label) + return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label, progress) } // pullImageFromReference pulls an image from a types.imageReference. @@ -294,7 +295,7 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef)) } defer goal.cleanUp() - return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil) + return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil, nil) } func cleanErrorMessage(err error) string { @@ -304,7 +305,7 @@ func cleanErrorMessage(err error) string { } // doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead. -func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) { +func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string, progress chan types.ProgressProperties) ([]string, error) { span, _ := opentracing.StartSpanFromContext(ctx, "doPullImage") defer span.Finish() @@ -328,6 +329,10 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa for _, imageInfo := range goal.refPairs { copyOptions := getCopyOptions(sc, writer, dockerOptions, nil, signingOptions, "", nil) copyOptions.SourceCtx.SystemRegistriesConfPath = systemRegistriesConfPath // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place. + if progress != nil { + copyOptions.Progress = progress + copyOptions.ProgressInterval = time.Second + } // Print the following statement only when pulling from a docker or atomic registry if writer != nil && (imageInfo.srcRef.Transport().Name() == DockerTransport || imageInfo.srcRef.Transport().Name() == AtomicTransport) { if _, err := io.WriteString(writer, fmt.Sprintf("Trying to pull %s...\n", imageInfo.image)); err != nil { diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index 26f15d9c8..3875878ed 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -822,6 +822,46 @@ func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *Container return nil } +// SafeRewriteContainerConfig rewrites a container's configuration. +// It's safer than RewriteContainerConfig, but still has limitations. Please +// read the comment in state.go before using. +func (s *InMemoryState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error { + if !ctr.valid { + return define.ErrCtrRemoved + } + + if _, err := s.nameIndex.Get(newName); err == nil { + return errors.Wrapf(define.ErrCtrExists, "name %s is in use", newName) + } + + // If the container does not exist, return error + stateCtr, ok := s.containers[ctr.ID()] + if !ok { + ctr.valid = false + return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) + } + + // Change name in registry. + if s.namespace != "" { + nsIndex, ok := s.namespaceIndexes[s.namespace] + if !ok { + return define.ErrInternal + } + nsIndex.nameIndex.Release(oldName) + if err := nsIndex.nameIndex.Reserve(newName, ctr.ID()); err != nil { + return errors.Wrapf(err, "error registering name %s", newName) + } + } + s.nameIndex.Release(oldName) + if err := s.nameIndex.Reserve(newName, ctr.ID()); err != nil { + return errors.Wrapf(err, "error registering name %s", newName) + } + + stateCtr.config = newCfg + + return nil +} + // RewritePodConfig rewrites a pod's configuration. // This function is DANGEROUS, even with in-memory state. // Please read the full comment on it in state.go before using it. diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index de7630c06..492bc807a 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -28,6 +28,7 @@ import ( "github.com/containers/podman/v3/libpod/define" "github.com/containers/podman/v3/libpod/logs" "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/podman/v3/pkg/checkpoint/crutils" "github.com/containers/podman/v3/pkg/errorhandling" "github.com/containers/podman/v3/pkg/lookup" "github.com/containers/podman/v3/pkg/rootless" @@ -837,16 +838,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) { // SupportsCheckpoint checks if the OCI runtime supports checkpointing // containers. func (r *ConmonOCIRuntime) SupportsCheckpoint() bool { - // Check if the runtime implements checkpointing. Currently only - // runc's checkpoint/restore implementation is supported. - cmd := exec.Command(r.path, "checkpoint", "--help") - if err := cmd.Start(); err != nil { - return false - } - if err := cmd.Wait(); err == nil { - return true - } - return false + return crutils.CRRuntimeSupportsCheckpointRestore(r.path) } // SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go index e97985180..df690e914 100644 --- a/libpod/rootless_cni_linux.go +++ b/libpod/rootless_cni_linux.go @@ -265,7 +265,7 @@ func startRootlessCNIInfraContainer(ctx context.Context, r *Runtime) (*Container } logrus.Debugf("rootless CNI: ensuring image %q to exist", imageName) newImage, err := r.ImageRuntime().New(ctx, imageName, "", "", nil, nil, - image.SigningOptions{}, nil, util.PullImageMissing) + image.SigningOptions{}, nil, util.PullImageMissing, nil) if err != nil { return nil, err } diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 8bf862bf2..301c4627d 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -74,8 +74,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config } // RenameContainer renames the given container. -// The given container object will be rendered unusable, and a new, renamed -// Container will be returned. +// Returns a copy of the container that has been renamed if successful. func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) { ctr.lock.Lock() defer ctr.lock.Unlock() @@ -88,26 +87,6 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s return nil, define.RegexError } - // Check if the name is available. - // This is *100% NOT ATOMIC* so any failures in-flight will do - // *VERY BAD THINGS* to the state. So we have to try and catch all we - // can before starting. - if _, err := r.state.LookupContainerID(newName); err == nil { - return nil, errors.Wrapf(define.ErrCtrExists, "name %s is already in use by another container", newName) - } - if _, err := r.state.LookupPod(newName); err == nil { - return nil, errors.Wrapf(define.ErrPodExists, "name %s is already in use by another pod", newName) - } - - // TODO: Investigate if it is possible to remove this limitation. - depCtrs, err := r.state.ContainerInUse(ctr) - if err != nil { - return nil, err - } - if len(depCtrs) > 0 { - return nil, errors.Wrapf(define.ErrCtrExists, "cannot rename container %s as it is in use by other containers: %v", ctr.ID(), strings.Join(depCtrs, ",")) - } - // We need to pull an updated config, in case another rename fired and // the config was re-written. newConf, err := r.state.GetContainerConfig(ctr.ID()) @@ -116,95 +95,33 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s } ctr.config = newConf - // TODO: This is going to fail if we have active exec sessions, too. - // Investigate fixing that at a later date. - - var pod *Pod - if ctr.config.Pod != "" { - tmpPod, err := r.state.Pod(ctr.config.Pod) - if err != nil { - return nil, errors.Wrapf(err, "error retrieving container %s pod", ctr.ID()) - } - pod = tmpPod - // Lock pod to ensure it's not removed while we're working - pod.lock.Lock() - defer pod.lock.Unlock() - } - - // Lock all volumes to ensure they are not removed while we're working - volsLocked := make(map[string]bool) - for _, namedVol := range ctr.config.NamedVolumes { - if volsLocked[namedVol.Name] { - continue - } - vol, err := r.state.Volume(namedVol.Name) - if err != nil { - return nil, errors.Wrapf(err, "error retrieving volume used by container %s", ctr.ID()) - } - - volsLocked[vol.Name()] = true - vol.lock.Lock() - defer vol.lock.Unlock() - } - logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName) - // Step 1: remove the old container. - if pod != nil { - if err := r.state.RemoveContainerFromPod(pod, ctr); err != nil { - return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID()) - } - } else { - if err := r.state.RemoveContainer(ctr); err != nil { - return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID()) - } - } - - // Step 2: Make a new container based on the old one. - // TODO: Should we deep-copy the container config and state, to be safe? - newCtr := new(Container) - newCtr.config = ctr.config - newCtr.state = ctr.state - newCtr.lock = ctr.lock - newCtr.ociRuntime = ctr.ociRuntime - newCtr.runtime = r - newCtr.rootlessSlirpSyncR = ctr.rootlessSlirpSyncR - newCtr.rootlessSlirpSyncW = ctr.rootlessSlirpSyncW - newCtr.rootlessPortSyncR = ctr.rootlessPortSyncR - newCtr.rootlessPortSyncW = ctr.rootlessPortSyncW - - newCtr.valid = true - newCtr.config.Name = newName - - // Step 3: Add that new container to the DB - if pod != nil { - if err := r.state.AddContainerToPod(pod, newCtr); err != nil { - return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID()) - } - } else { - if err := r.state.AddContainer(newCtr); err != nil { - return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID()) - } - } + // Step 1: Alter the config. Save the old name, we need it to rewrite + // the config. + oldName := ctr.config.Name + ctr.config.Name = newName - // Step 4: Save the new container, to force the state to be written to - // the DB. This may not be necessary, depending on DB implementation, - // but let's do it to be safe. - if err := newCtr.save(); err != nil { - return nil, err + // Step 2: rewrite the old container's config in the DB. + if err := r.state.SafeRewriteContainerConfig(ctr, oldName, ctr.config.Name, ctr.config); err != nil { + // Assume the rename failed. + // Set config back to the old name so reflect what is actually + // present in the DB. + ctr.config.Name = oldName + return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID()) } - // Step 5: rename the container in c/storage. + // Step 3: rename the container in c/storage. // This can fail if the name is already in use by a non-Podman // container. This puts us in a bad spot - we've already renamed the // container in Podman. We can swap the order, but then we have the // opposite problem. Atomicity is a real problem here, with no easy // solution. - if err := r.store.SetNames(newCtr.ID(), []string{newCtr.Name()}); err != nil { + if err := r.store.SetNames(ctr.ID(), []string{ctr.Name()}); err != nil { return nil, err } - return newCtr, nil + return ctr, nil } func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) { diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go index 000029fa4..0a09e40ea 100644 --- a/libpod/runtime_pod_infra_linux.go +++ b/libpod/runtime_pod_infra_linux.go @@ -216,7 +216,7 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container, if img == "" { img = r.config.Engine.InfraImage } - newImage, err := r.ImageRuntime().New(ctx, img, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing) + newImage, err := r.ImageRuntime().New(ctx, img, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing, nil) if err != nil { return nil, err } diff --git a/libpod/state.go b/libpod/state.go index 074d21740..4b711bae9 100644 --- a/libpod/state.go +++ b/libpod/state.go @@ -155,6 +155,19 @@ type State interface { // answer is this: use this only very sparingly, and only if you really // know what you're doing. RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error + // This is a more limited version of RewriteContainerConfig, though it + // comes with the added ability to alter a container's name. In exchange + // it loses the ability to manipulate the container's locks. + // It is not intended to be as restrictive as RewriteContainerConfig, in + // that we allow it to be run while other Podman processes are running, + // and without holding the alive lock. + // Container ID and pod membership still *ABSOLUTELY CANNOT* be altered. + // Also, you cannot change a container's dependencies - shared namespace + // containers or generic dependencies - at present. This is + // theoretically possible but not yet implemented. + // If newName is not "" the container will be renamed to the new name. + // The oldName parameter is only required if newName is given. + SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING. // This function is identical to RewriteContainerConfig, save for the // fact that it is used with pods instead. diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go index 971b6aa50..d26bb50f4 100644 --- a/pkg/api/handlers/compat/containers.go +++ b/pkg/api/handlers/compat/containers.go @@ -307,6 +307,34 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error } } + portMappings, err := l.PortMappings() + if err != nil { + return nil, err + } + + ports := make([]types.Port, len(portMappings)) + for idx, portMapping := range portMappings { + ports[idx] = types.Port{ + IP: portMapping.HostIP, + PrivatePort: uint16(portMapping.ContainerPort), + PublicPort: uint16(portMapping.HostPort), + Type: portMapping.Protocol, + } + } + inspect, err := l.Inspect(false) + if err != nil { + return nil, err + } + + n, err := json.Marshal(inspect.NetworkSettings) + if err != nil { + return nil, err + } + networkSettings := types.SummaryNetworkSettings{} + if err := json.Unmarshal(n, &networkSettings); err != nil { + return nil, err + } + return &handlers.Container{Container: types.Container{ ID: l.ID(), Names: []string{fmt.Sprintf("/%s", l.Name())}, @@ -314,7 +342,7 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error ImageID: imageID, Command: strings.Join(l.Command(), " "), Created: l.CreatedTime().Unix(), - Ports: nil, + Ports: ports, SizeRw: sizeRW, SizeRootFs: sizeRootFs, Labels: l.Labels(), @@ -324,7 +352,7 @@ func LibpodToContainer(l *libpod.Container, sz bool) (*handlers.Container, error NetworkMode string `json:",omitempty"` }{ "host"}, - NetworkSettings: nil, + NetworkSettings: &networkSettings, Mounts: nil, }, ContainerCreateConfig: types.ContainerCreateConfig{}, diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go index 1a4dd939e..e5caa9ea5 100644 --- a/pkg/api/handlers/compat/images.go +++ b/pkg/api/handlers/compat/images.go @@ -1,6 +1,7 @@ package compat import ( + "context" "encoding/json" "fmt" "io" @@ -11,11 +12,13 @@ import ( "github.com/containers/buildah" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/containers/podman/v3/libpod" image2 "github.com/containers/podman/v3/libpod/image" "github.com/containers/podman/v3/pkg/api/handlers" "github.com/containers/podman/v3/pkg/api/handlers/utils" "github.com/containers/podman/v3/pkg/auth" + "github.com/containers/podman/v3/pkg/channel" "github.com/containers/podman/v3/pkg/domain/entities" "github.com/containers/podman/v3/pkg/util" "github.com/gorilla/schema" @@ -236,33 +239,103 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) { if sys := runtime.SystemContext(); sys != nil { registryOpts.DockerCertPath = sys.DockerCertPath } - img, err := runtime.ImageRuntime().New(r.Context(), - fromImage, - "", // signature policy - authfile, - nil, // writer - ®istryOpts, - image2.SigningOptions{}, - nil, // label - util.PullImageAlways, - ) - if err != nil { - utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err) - return + + stderr := channel.NewWriter(make(chan []byte)) + defer stderr.Close() + + progress := make(chan types.ProgressProperties) + + var img string + runCtx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + + newImage, err := runtime.ImageRuntime().New( + runCtx, + fromImage, + "", // signature policy + authfile, + nil, // writer + ®istryOpts, + image2.SigningOptions{}, + nil, // label + util.PullImageAlways, + progress) + if err != nil { + stderr.Write([]byte(err.Error() + "\n")) + } else { + img = newImage.ID() + } + }() + + flush := func() { + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } } - // Success - utils.WriteResponse(w, http.StatusOK, struct { - Status string `json:"status"` - Error string `json:"error,omitempty"` - Progress string `json:"progress"` - ProgressDetail map[string]string `json:"progressDetail"` - Id string `json:"id"` // nolint - }{ - Status: fmt.Sprintf("pulling image (%s) from %s (Download complete)", img.Tag, strings.Join(img.Names(), ", ")), - ProgressDetail: map[string]string{}, - Id: img.ID(), - }) + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + flush() + + enc := json.NewEncoder(w) + enc.SetEscapeHTML(true) + var failed bool + +loop: // break out of for/select infinite loop + for { + var report struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress struct { + Current uint64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + } `json:"progressDetail,omitempty"` + Error string `json:"error,omitempty"` + Id string `json:"id,omitempty"` // nolint + } + + select { + case e := <-progress: + switch e.Event { + case types.ProgressEventNewArtifact: + report.Status = "Pulling fs layer" + case types.ProgressEventRead: + report.Status = "Downloading" + report.Progress.Current = e.Offset + report.Progress.Total = e.Artifact.Size + case types.ProgressEventSkipped: + report.Status = "Already exists" + case types.ProgressEventDone: + report.Status = "Download complete" + } + report.Id = e.Artifact.Digest.Encoded()[0:12] + if err := enc.Encode(report); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-stderr.Chan(): + failed = true + report.Error = string(e) + if err := enc.Encode(report); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + case <-runCtx.Done(): + if !failed { + report.Status = "Pull complete" + report.Id = img[0:12] + if err := enc.Encode(report); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + } + break loop // break out of for/select infinite loop + case <-r.Context().Done(): + // Client has closed connection + break loop // break out of for/select infinite loop + } + } } func GetImage(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go index d79b100e8..009fcf7e8 100644 --- a/pkg/api/handlers/compat/images_build.go +++ b/pkg/api/handlers/compat/images_build.go @@ -104,6 +104,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { Squash bool `schema:"squash"` Tag []string `schema:"t"` Target string `schema:"target"` + Timestamp int64 `schema:"timestamp"` }{ Dockerfile: "Dockerfile", Registry: "docker.io", @@ -318,6 +319,11 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { Target: query.Target, } + if _, found := r.URL.Query()["timestamp"]; found { + ts := time.Unix(query.Timestamp, 0) + buildOptions.Timestamp = &ts + } + runCtx, cancel := context.WithCancel(context.Background()) var imageID string go func() { diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go index 1a04b4289..28e90ac28 100644 --- a/pkg/api/handlers/compat/networks.go +++ b/pkg/api/handlers/compat/networks.go @@ -180,16 +180,18 @@ func findPluginByName(plugins []*libcni.NetworkConfig, pluginType string) ([]byt func ListNetworks(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) - decoder := r.Context().Value("decoder").(*schema.Decoder) - query := struct { - Filters map[string][]string `schema:"filters"` - }{ - // override any golang type defaults - } - if err := decoder.Decode(&query, r.URL.Query()); err != nil { + filters, err := filtersFromRequest(r) + if err != nil { utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) return } + filterMap := map[string][]string{} + for _, filter := range filters { + split := strings.SplitN(filter, "=", 2) + if len(split) > 1 { + filterMap[split[0]] = append(filterMap[split[0]], split[1]) + } + } config, err := runtime.GetConfig() if err != nil { utils.InternalServerError(w, err) @@ -205,7 +207,7 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) { reports := []*types.NetworkResource{} logrus.Debugf("netNames: %q", strings.Join(netNames, ", ")) for _, name := range netNames { - report, err := getNetworkResourceByNameOrID(name, runtime, query.Filters) + report, err := getNetworkResourceByNameOrID(name, runtime, filterMap) if err != nil { utils.InternalServerError(w, err) return diff --git a/pkg/api/handlers/compat/secrets.go b/pkg/api/handlers/compat/secrets.go index c5ee8c324..86e3887a4 100644 --- a/pkg/api/handlers/compat/secrets.go +++ b/pkg/api/handlers/compat/secrets.go @@ -40,7 +40,21 @@ func ListSecrets(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - utils.WriteResponse(w, http.StatusOK, reports) + if utils.IsLibpodRequest(r) { + utils.WriteResponse(w, http.StatusOK, reports) + return + } + // Docker compat expects a version field that increments when the secret is updated + // We currently can't update a secret, so we default the version to 1 + compatReports := make([]entities.SecretInfoReportCompat, 0, len(reports)) + for _, report := range reports { + compatRep := entities.SecretInfoReportCompat{ + SecretInfoReport: *report, + Version: entities.SecretVersion{Index: 1}, + } + compatReports = append(compatReports, compatRep) + } + utils.WriteResponse(w, http.StatusOK, compatReports) } func InspectSecret(w http.ResponseWriter, r *http.Request) { @@ -59,7 +73,21 @@ func InspectSecret(w http.ResponseWriter, r *http.Request) { utils.SecretNotFound(w, name, errs[0]) return } - utils.WriteResponse(w, http.StatusOK, reports[0]) + if len(reports) < 1 { + utils.InternalServerError(w, err) + return + } + if utils.IsLibpodRequest(r) { + utils.WriteResponse(w, http.StatusOK, reports[0]) + return + } + // Docker compat expects a version field that increments when the secret is updated + // We currently can't update a secret, so we default the version to 1 + compatReport := entities.SecretInfoReportCompat{ + SecretInfoReport: *reports[0], + Version: entities.SecretVersion{Index: 1}, + } + utils.WriteResponse(w, http.StatusOK, compatReport) } func RemoveSecret(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/api/handlers/libpod/images_pull.go b/pkg/api/handlers/libpod/images_pull.go index c8b777be4..e2e4b53b4 100644 --- a/pkg/api/handlers/libpod/images_pull.go +++ b/pkg/api/handlers/libpod/images_pull.go @@ -136,7 +136,8 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) { &dockerRegistryOptions, image.SigningOptions{}, nil, - util.PullImageAlways) + util.PullImageAlways, + nil) if err != nil { stderr.Write([]byte(err.Error() + "\n")) } else { diff --git a/pkg/api/server/register_archive.go b/pkg/api/server/register_archive.go index 2a5cfba0b..2ac126644 100644 --- a/pkg/api/server/register_archive.go +++ b/pkg/api/server/register_archive.go @@ -91,7 +91,7 @@ func (s *APIServer) registerArchiveHandlers(r *mux.Router) error { Libpod */ - // swagger:operation POST /libpod/containers/{name}/archive libpod libpodPutArchive + // swagger:operation PUT /libpod/containers/{name}/archive libpod libpodPutArchive // --- // summary: Copy files into a container // description: Copy a tar archive of files into a container diff --git a/pkg/api/server/register_secrets.go b/pkg/api/server/register_secrets.go index 1c5f5954b..531623845 100644 --- a/pkg/api/server/register_secrets.go +++ b/pkg/api/server/register_secrets.go @@ -115,7 +115,7 @@ func (s *APIServer) registerSecretHandlers(r *mux.Router) error { // parameters: // responses: // '200': - // "$ref": "#/responses/SecretListResponse" + // "$ref": "#/responses/SecretListCompatResponse" // '500': // "$ref": "#/responses/InternalError" r.Handle(VersionedPath("/secrets"), s.APIHandler(compat.ListSecrets)).Methods(http.MethodGet) @@ -158,7 +158,7 @@ func (s *APIServer) registerSecretHandlers(r *mux.Router) error { // - application/json // responses: // '200': - // "$ref": "#/responses/SecretInspectResponse" + // "$ref": "#/responses/SecretInspectCompatResponse" // '404': // "$ref": "#/responses/NoSuchSecret" // '500': diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go index 0cf51e5a6..53095c295 100644 --- a/pkg/autoupdate/autoupdate.go +++ b/pkg/autoupdate/autoupdate.go @@ -304,6 +304,7 @@ func updateImage(runtime *libpod.Runtime, name string, options Options) (*image. image.SigningOptions{}, nil, util.PullImageAlways, + nil, ) if err != nil { return nil, err diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go index 6e16461e5..27706fd2c 100644 --- a/pkg/bindings/images/build.go +++ b/pkg/bindings/images/build.go @@ -185,6 +185,12 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if options.Squash { params.Set("squash", "1") } + + if options.Timestamp != nil { + t := *options.Timestamp + params.Set("timestamp", strconv.FormatInt(t.Unix(), 10)) + } + var ( headers map[string]string err error diff --git a/pkg/checkpoint/checkpoint_restore.go b/pkg/checkpoint/checkpoint_restore.go index a608762b5..77a993128 100644 --- a/pkg/checkpoint/checkpoint_restore.go +++ b/pkg/checkpoint/checkpoint_restore.go @@ -4,15 +4,14 @@ import ( "context" "io/ioutil" "os" - "path/filepath" + metadata "github.com/checkpoint-restore/checkpointctl/lib" "github.com/containers/podman/v3/libpod" "github.com/containers/podman/v3/libpod/image" "github.com/containers/podman/v3/pkg/domain/entities" "github.com/containers/podman/v3/pkg/errorhandling" "github.com/containers/podman/v3/pkg/util" "github.com/containers/storage/pkg/archive" - jsoniter "github.com/json-iterator/go" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -20,21 +19,6 @@ import ( // Prefixing the checkpoint/restore related functions with 'cr' -// crImportFromJSON imports the JSON files stored in the exported -// checkpoint tarball -func crImportFromJSON(filePath string, v interface{}) error { - content, err := ioutil.ReadFile(filePath) - if err != nil { - return errors.Wrap(err, "failed to read container definition for restore") - } - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err = json.Unmarshal(content, v); err != nil { - return errors.Wrapf(err, "failed to unmarshal container definition %s for restore", filePath) - } - - return nil -} - // CRImportCheckpoint it the function which imports the information // from checkpoint tarball and re-creates the container from that information func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOptions entities.RestoreOptions) ([]*libpod.Container, error) { @@ -48,13 +32,13 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt options := &archive.TarOptions{ // Here we only need the files config.dump and spec.dump ExcludePatterns: []string{ - "checkpoint", - "artifacts", - "ctr.log", - "rootfs-diff.tar", - "network.status", - "deleted.files", "volumes", + "ctr.log", + "artifacts", + metadata.RootFsDiffTar, + metadata.DeletedFilesFile, + metadata.NetworkStatusFile, + metadata.CheckpointDirectory, }, } dir, err := ioutil.TempDir("", "checkpoint") @@ -73,13 +57,13 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt // Load spec.dump from temporary directory dumpSpec := new(spec.Spec) - if err := crImportFromJSON(filepath.Join(dir, "spec.dump"), dumpSpec); err != nil { + if _, err := metadata.ReadJSONFile(dumpSpec, dir, metadata.SpecDumpFile); err != nil { return nil, err } // Load config.dump from temporary directory config := new(libpod.ContainerConfig) - if err = crImportFromJSON(filepath.Join(dir, "config.dump"), config); err != nil { + if _, err = metadata.ReadJSONFile(config, dir, metadata.ConfigDumpFile); err != nil { return nil, err } @@ -121,7 +105,7 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt return nil, err } - _, err = runtime.ImageRuntime().New(ctx, config.RootfsImageName, rtc.Engine.SignaturePolicyPath, "", writer, nil, image.SigningOptions{}, nil, util.PullImageMissing) + _, err = runtime.ImageRuntime().New(ctx, config.RootfsImageName, rtc.Engine.SignaturePolicyPath, "", writer, nil, image.SigningOptions{}, nil, util.PullImageMissing, nil) if err != nil { return nil, err } diff --git a/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/pkg/checkpoint/crutils/checkpoint_restore_utils.go new file mode 100644 index 000000000..53ff55865 --- /dev/null +++ b/pkg/checkpoint/crutils/checkpoint_restore_utils.go @@ -0,0 +1,191 @@ +package crutils + +import ( + "io" + "os" + "os/exec" + "path/filepath" + + metadata "github.com/checkpoint-restore/checkpointctl/lib" + "github.com/containers/storage/pkg/archive" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +// This file mainly exist to make the checkpoint/restore functions +// available for other users. One possible candidate would be CRI-O. + +// CRImportCheckpointWithoutConfig imports the checkpoint archive (input) +// into the directory destination without "config.dump" and "spec.dump" +func CRImportCheckpointWithoutConfig(destination, input string) error { + archiveFile, err := os.Open(input) + if err != nil { + return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input) + } + + defer archiveFile.Close() + options := &archive.TarOptions{ + ExcludePatterns: []string{ + // Import everything else besides the container config + metadata.ConfigDumpFile, + metadata.SpecDumpFile, + }, + } + if err = archive.Untar(archiveFile, destination, options); err != nil { + return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input) + } + + return nil +} + +// CRRemoveDeletedFiles loads the list of deleted files and if +// it exists deletes all files listed. +func CRRemoveDeletedFiles(id, baseDirectory, containerRootDirectory string) error { + deletedFiles, _, err := metadata.ReadContainerCheckpointDeletedFiles(baseDirectory) + if os.IsNotExist(errors.Unwrap(errors.Unwrap(err))) { + // No files to delete. Just return + return nil + } + + if err != nil { + return errors.Wrapf(err, "failed to read deleted files file") + } + + for _, deleteFile := range deletedFiles { + // Using RemoveAll as deletedFiles, which is generated from 'podman diff' + // lists completely deleted directories as a single entry: 'D /root'. + if err := os.RemoveAll(filepath.Join(containerRootDirectory, deleteFile)); err != nil { + return errors.Wrapf(err, "failed to delete files from container %s during restore", id) + } + } + + return nil +} + +// CRApplyRootFsDiffTar applies the tar archive found in baseDirectory with the +// root file system changes on top of containerRootDirectory +func CRApplyRootFsDiffTar(baseDirectory, containerRootDirectory string) error { + rootfsDiffPath := filepath.Join(baseDirectory, metadata.RootFsDiffTar) + if _, err := os.Stat(rootfsDiffPath); err != nil { + // Only do this if a rootfs-diff.tar actually exists + return nil + } + + rootfsDiffFile, err := os.Open(rootfsDiffPath) + if err != nil { + return errors.Wrap(err, "failed to open root file-system diff file") + } + defer rootfsDiffFile.Close() + + if err := archive.Untar(rootfsDiffFile, containerRootDirectory, nil); err != nil { + return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath) + } + + return nil +} + +// CRCreateRootFsDiffTar goes through the 'changes' and can create two files: +// * metadata.RootFsDiffTar will contain all new and changed files +// * metadata.DeletedFilesFile will contain a list of deleted files +// With these two files it is possible to restore the container file system to the same +// state it was during checkpointing. +// Changes to directories (owner, mode) are not handled. +func CRCreateRootFsDiffTar(changes *[]archive.Change, mountPoint, destination string) (includeFiles []string, err error) { + if len(*changes) == 0 { + return includeFiles, nil + } + + var rootfsIncludeFiles []string + var deletedFiles []string + + rootfsDiffPath := filepath.Join(destination, metadata.RootFsDiffTar) + + for _, file := range *changes { + if file.Kind == archive.ChangeAdd { + rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path) + continue + } + if file.Kind == archive.ChangeDelete { + deletedFiles = append(deletedFiles, file.Path) + continue + } + fileName, err := os.Stat(file.Path) + if err != nil { + continue + } + if !fileName.IsDir() && file.Kind == archive.ChangeModify { + rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path) + continue + } + } + + if len(rootfsIncludeFiles) > 0 { + rootfsTar, err := archive.TarWithOptions(mountPoint, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeSourceDir: true, + IncludeFiles: rootfsIncludeFiles, + }) + if err != nil { + return includeFiles, errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath) + } + rootfsDiffFile, err := os.Create(rootfsDiffPath) + if err != nil { + return includeFiles, errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath) + } + defer rootfsDiffFile.Close() + if _, err = io.Copy(rootfsDiffFile, rootfsTar); err != nil { + return includeFiles, err + } + + includeFiles = append(includeFiles, metadata.RootFsDiffTar) + } + + if len(deletedFiles) == 0 { + return includeFiles, nil + } + + if _, err := metadata.WriteJSONFile(deletedFiles, destination, metadata.DeletedFilesFile); err != nil { + return includeFiles, nil + } + + includeFiles = append(includeFiles, metadata.DeletedFilesFile) + + return includeFiles, nil +} + +// CRCreateFileWithLabel creates an empty file and sets the corresponding ('fileLabel') +// SELinux label on the file. +// This is necessary for CRIU log files because CRIU infects the processes in +// the container with a 'parasite' and this will also try to write to the log files +// from the context of the container processes. +func CRCreateFileWithLabel(directory, fileName, fileLabel string) error { + logFileName := filepath.Join(directory, fileName) + + logFile, err := os.OpenFile(logFileName, os.O_CREATE, 0o600) + if err != nil { + return errors.Wrapf(err, "failed to create file %q", logFileName) + } + defer logFile.Close() + if err = label.SetFileLabel(logFileName, fileLabel); err != nil { + return errors.Wrapf(err, "failed to label file %q", logFileName) + } + + return nil +} + +// CRRuntimeSupportsCheckpointRestore tests if the given runtime at 'runtimePath' +// supports checkpointing. The checkpoint restore interface has no definition +// but crun implements all commands just as runc does. Whathh runc does it the +// official definition of the checkpoint/restore interface. +func CRRuntimeSupportsCheckpointRestore(runtimePath string) bool { + // Check if the runtime implements checkpointing. Currently only + // runc's and crun's checkpoint/restore implementation is supported. + cmd := exec.Command(runtimePath, "checkpoint", "--help") + if err := cmd.Start(); err != nil { + return false + } + if err := cmd.Wait(); err == nil { + return true + } + return false +} diff --git a/pkg/domain/entities/secrets.go b/pkg/domain/entities/secrets.go index 3481cbe05..8ede981da 100644 --- a/pkg/domain/entities/secrets.go +++ b/pkg/domain/entities/secrets.go @@ -42,6 +42,15 @@ type SecretInfoReport struct { Spec SecretSpec } +type SecretInfoReportCompat struct { + SecretInfoReport + Version SecretVersion +} + +type SecretVersion struct { + Index int +} + type SecretSpec struct { Name string Driver SecretDriverSpec @@ -78,6 +87,13 @@ type SwagSecretListResponse struct { Body []*SecretInfoReport } +// Secret list response +// swagger:response SecretListCompatResponse +type SwagSecretListCompatResponse struct { + // in:body + Body []*SecretInfoReportCompat +} + // Secret inspect response // swagger:response SecretInspectResponse type SwagSecretInspectResponse struct { @@ -85,6 +101,13 @@ type SwagSecretInspectResponse struct { Body SecretInfoReport } +// Secret inspect compat +// swagger:response SecretInspectCompatResponse +type SwagSecretInspectCompatResponse struct { + // in:body + Body SecretInfoReportCompat +} + // No such secret // swagger:response NoSuchSecret type SwagErrNoSuchSecret struct { diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index 46d967789..562653403 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -247,7 +247,7 @@ func pull(ctx context.Context, runtime *image.Runtime, rawImage string, options } if !options.AllTags { - newImage, err := runtime.New(ctx, rawImage, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, label, options.PullPolicy) + newImage, err := runtime.New(ctx, rawImage, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, label, options.PullPolicy, nil) if err != nil { return nil, err } @@ -280,7 +280,7 @@ func pull(ctx context.Context, runtime *image.Runtime, rawImage string, options foundIDs := []string{} for _, tag := range tags { name := rawImage + ":" + tag - newImage, err := runtime.New(ctx, name, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageAlways) + newImage, err := runtime.New(ctx, name, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageAlways, nil) if err != nil { logrus.Errorf("error pulling image %q", name) continue diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go index c5e20a607..b7ca69281 100644 --- a/pkg/domain/infra/abi/play.go +++ b/pkg/domain/infra/abi/play.go @@ -221,7 +221,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY } // This ensures the image is the image store - newImage, err := ic.Libpod.ImageRuntime().New(ctx, container.Image, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy) + newImage, err := ic.Libpod.ImageRuntime().New(ctx, container.Image, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy, nil) if err != nil { return nil, err } diff --git a/test/apiv2/10-images.at b/test/apiv2/10-images.at index a650cf958..f866422e2 100644 --- a/test/apiv2/10-images.at +++ b/test/apiv2/10-images.at @@ -41,7 +41,7 @@ t GET images/$iid/json 200 \ .Id=sha256:$iid \ .RepoTags[0]=$IMAGE -t POST "images/create?fromImage=alpine" '' 200 .error=null .status~".*Download complete.*" +t POST "images/create?fromImage=alpine" '' 200 .error~null .status~".*Download complete.*" t POST "images/create?fromImage=alpine&tag=latest" '' 200 @@ -49,7 +49,7 @@ t POST "images/create?fromImage=alpine&tag=latest" '' 200 old_iid=$(podman image inspect --format "{{.ID}}" docker.io/library/alpine:latest) podman rmi -f docker.io/library/alpine:latest podman tag $IMAGE docker.io/library/alpine:latest -t POST "images/create?fromImage=alpine" '' 200 .error=null .status~".*$old_iid.*" +t POST "images/create?fromImage=alpine" '' 200 .error~null .status~".*$old_iid.*" podman untag $IMAGE docker.io/library/alpine:latest t POST "images/create?fromImage=quay.io/libpod/alpine&tag=sha256:fa93b01658e3a5a1686dc3ae55f170d8de487006fb53a28efcd12ab0710a2e5f" '' 200 diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at index a99e9a184..18364a47d 100644 --- a/test/apiv2/20-containers.at +++ b/test/apiv2/20-containers.at @@ -31,6 +31,13 @@ t GET libpod/containers/json?all=true 200 \ .[0].ExitCode=0 \ .[0].IsInfra=false +# Test compat API for Network Settings +t GET /containers/json?all=true 200 \ + length=1 \ + .[0].Id~[0-9a-f]\\{64\\} \ + .[0].Image=$IMAGE \ + .[0].NetworkSettings.Networks.podman.NetworkID=podman + # Make sure `limit` works. t GET libpod/containers/json?limit=1 200 \ length=1 \ @@ -206,9 +213,9 @@ t GET 'containers/json?limit=0&all=1' 200 \ t GET containers/json?limit=2 200 length=2 # Filter with two ids should return both container -t GET "containers/json?filters=%7B%22id%22%3A%5B%22${cid}%22%2C%22${cid_top}%22%5D%7D&all=1" 200 length=2 +t GET containers/json?filters='{"id":["'${cid}'","'${cid_top}'"]}&all=1' 200 length=2 # Filter with two ids and status running should return only 1 container -t GET "containers/json?filters=%7B%22id%22%3A%5B%22${cid}%22%2C%22${cid_top}%22%5D%2C%22status%22%3A%5B%22running%22%5D%7D&all=1" 200 \ +t GET containers/json?filters='{"id":["'${cid}'","'${cid_top}'"],"status":["running"]}&all=1' 200 \ length=1 \ .[0].Id=${cid_top} @@ -246,3 +253,13 @@ t GET containers/$cid/json 200 \ .Mounts[0].Destination="/test" t DELETE containers/$cid?v=true 204 + +# test port mapping +podman run -d --rm --name bar -p 8080:9090 $IMAGE top + +t GET containers/json 200 \ + .[0].Ports[0].PrivatePort=9090 \ + .[0].Ports[0].PublicPort=8080 \ + .[0].Ports[0].Type="tcp" + +podman stop bar diff --git a/test/apiv2/30-volumes.at b/test/apiv2/30-volumes.at index b38810039..cf4b3d3ea 100644 --- a/test/apiv2/30-volumes.at +++ b/test/apiv2/30-volumes.at @@ -45,18 +45,17 @@ t GET libpod/volumes/json 200 \ .[0].Name~.* \ .[0].Mountpoint~.* \ .[0].CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.* -# -G --data-urlencode 'filters={"name":["foo1"]}' -t GET libpod/volumes/json?filters=%7B%22name%22%3A%5B%22foo1%22%5D%7D 200 length=1 .[0].Name=foo1 -# -G --data-urlencode 'filters={"name":["foo1","foo2"]}' -t GET libpod/volumes/json?filters=%7B%22name%22%3A%20%5B%22foo1%22%2C%20%22foo2%22%5D%7D 200 length=2 .[0].Name=foo1 .[1].Name=foo2 -# -G --data-urlencode 'filters={"name":["nonexistent"]}' -t GET libpod/volumes/json?filters=%7B%22name%22%3A%5B%22nonexistent%22%5D%7D 200 length=0 -# -G --data-urlencode 'filters={"label":["testlabel"]}' -t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel%22%5D%7D 200 length=2 -# -G --data-urlencode 'filters={"label":["testlabel=testonly"]}' -t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel=testonly%22%5D%7D 200 length=1 -# -G --data-urlencode 'filters={"label":["testlabel1=testonly"]}' -t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel1=testonly%22%5D%7D 200 length=1 +t GET libpod/volumes/json?filters='{"name":["foo1"]}' 200 \ + length=1 \ + .[0].Name=foo1 +t GET libpod/volumes/json?filters='{"name":%20["foo1",%20"foo2"]}' 200 \ + length=2 \ + .[0].Name=foo1 \ + .[1].Name=foo2 +t GET libpod/volumes/json?filters='{"name":["nonexistent"]}' 200 length=0 +t GET libpod/volumes/json?filters='{"label":["testlabel"]}' 200 length=2 +t GET libpod/volumes/json?filters='{"label":["testlabel=testonly"]}' 200 length=1 +t GET libpod/volumes/json?filters='{"label":["testlabel1=testonly"]}' 200 length=1 ## inspect volume t GET libpod/volumes/foo1/json 200 \ @@ -79,16 +78,12 @@ t DELETE libpod/volumes/foo1 404 \ .response=404 ## Prune volumes with label matching 'testlabel1=testonly' -# -G --data-urlencode 'filters={"label":["testlabel1=testonly"]}' -t POST libpod/volumes/prune?filters=%7B%22label%22:%5B%22testlabel1=testonly%22%5D%7D "" 200 -# -G --data-urlencode 'filters={"label":["testlabel1=testonly"]}' -t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel1=testonly%22%5D%7D 200 length=0 +t POST libpod/volumes/prune?filters='{"label":["testlabel1=testonly"]}' "" 200 +t GET libpod/volumes/json?filters='{"label":["testlabel1=testonly"]}' 200 length=0 ## Prune volumes with label matching 'testlabel' -# -G --data-urlencode 'filters={"label":["testlabel"]}' -t POST libpod/volumes/prune?filters=%7B%22label%22:%5B%22testlabel%22%5D%7D "" 200 -# -G --data-urlencode 'filters={"label":["testlabel"]}' -t GET libpod/volumes/json?filters=%7B%22label%22:%5B%22testlabel%22%5D%7D 200 length=0 +t POST libpod/volumes/prune?filters='{"label":["testlabel"]}' "" 200 +t GET libpod/volumes/json?filters='{"label":["testlabel"]}' 200 length=0 ## Prune volumes t POST libpod/volumes/prune "" 200 diff --git a/test/apiv2/35-networks.at b/test/apiv2/35-networks.at index 7ce109913..d3bbaf32b 100644 --- a/test/apiv2/35-networks.at +++ b/test/apiv2/35-networks.at @@ -7,54 +7,52 @@ t GET networks/non-existing-network 404 \ .cause='network not found' t POST libpod/networks/create?name=network1 '' 200 \ -.Filename~.*/network1\\.conflist + .Filename~.*/network1\\.conflist # --data '{"Subnet":{"IP":"10.10.254.0","Mask":[255,255,255,0]},"Labels":{"abc":"val"}}' t POST libpod/networks/create?name=network2 '"Subnet":{"IP":"10.10.254.0","Mask":[255,255,255,0]},"Labels":{"abc":"val"}' 200 \ -.Filename~.*/network2\\.conflist + .Filename~.*/network2\\.conflist # test for empty mask t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[]}' 500 \ -.cause~'.*cannot be empty' + .cause~'.*cannot be empty' # test for invalid mask t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[0,255,255,0]}' 500 \ -.cause~'.*mask is invalid' + .cause~'.*mask is invalid' # network list t GET libpod/networks/json 200 -# filters={"name":["network1"]} -t GET libpod/networks/json?filters=%7B%22name%22%3A%5B%22network1%22%5D%7D 200 \ -length=1 \ -.[0].Name=network1 +t GET libpod/networks/json?filters='{"name":["network1"]}' 200 \ + length=1 \ + .[0].Name=network1 t GET networks 200 #network list docker endpoint -#filters={"name":["network1","network2"]} -t GET networks?filters=%7B%22name%22%3A%5B%22network1%22%2C%22network2%22%5D%7D 200 \ -length=2 -#filters={"name":["network"]} -t GET networks?filters=%7B%22name%22%3A%5B%22network%22%5D%7D 200 \ -length=2 -# filters={"label":["abc"]} -t GET networks?filters=%7B%22label%22%3A%5B%22abc%22%5D%7D 200 \ -length=1 -# id filter filters={"id":["a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1"]} -t GET networks?filters=%7B%22id%22%3A%5B%22a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1%22%5D%7D 200 \ -length=1 \ -.[0].Name=network1 \ -.[0].Id=a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1 -# invalid filter filters={"dangling":["1"]} -t GET networks?filters=%7B%22dangling%22%3A%5B%221%22%5D%7D 500 \ -.cause='invalid filter "dangling"' +t GET networks?filters='{"name":["network1","network2"]}' 200 \ + length=2 +t GET networks?filters='{"name":["network"]}' 200 \ + length=2 +t GET networks?filters='{"label":["abc"]}' 200 \ + length=1 +# old docker filter type see #9526 +t GET networks?filters='{"label":{"abc":true}}' 200 \ + length=1 +t GET networks?filters='{"id":["a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1"]}' 200 \ + length=1 \ + .[0].Name=network1 \ + .[0].Id=a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1 +# invalid filter +t GET networks?filters='{"dangling":["1"]}' 500 \ + .cause='invalid filter "dangling"' # (#9293 with no networks the endpoint should return empty array instead of null) -t GET networks?filters=%7B%22name%22%3A%5B%22doesnotexists%22%5D%7D 200 \ -"[]" +t GET networks?filters='{"name":["doesnotexists"]}' 200 \ + "[]" # network inspect docker t GET networks/a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1 200 \ -.Name=network1 \ -.Id=a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1 \ -.Scope=local + .Name=network1 \ + .Id=a7662f44d65029fd4635c91feea3d720a57cef52e2a9fcc7772b69072cc1ccd1 \ + .Scope=local # network create docker t POST networks/create '"Name":"net3","IPAM":{"Config":[]}' 201 @@ -63,11 +61,11 @@ t DELETE networks/net3 204 # clean the network t DELETE libpod/networks/network1 200 \ -.[0].Name~network1 \ -.[0].Err=null + .[0].Name~network1 \ + .[0].Err=null t DELETE libpod/networks/network2 200 \ -.[0].Name~network2 \ -.[0].Err=null + .[0].Name~network2 \ + .[0].Err=null # vim: filetype=sh diff --git a/test/apiv2/44-mounts.at b/test/apiv2/44-mounts.at new file mode 100644 index 000000000..fe202576d --- /dev/null +++ b/test/apiv2/44-mounts.at @@ -0,0 +1,21 @@ +# -*- sh -*- + +podman pull $IMAGE &>/dev/null + +# Test various HostConfig options +tmpfs_name="/mytmpfs" +t POST containers/create?name=hostconfig_test '"Image":"'$IMAGE'","Cmd":["df"],"HostConfig":{"TmpFs":{"'$tmpfs_name'":"rw"}}' 201 \ + .Id~[0-9a-f]\\{64\\} +cid=$(jq -r '.Id' <<<"$output") + +# Prior to #9512, the tmpfs would be called '/mytmpfs=rw', with the '=rw' +t GET containers/${cid}/json 200 \ + .HostConfig.Tmpfs[\"${tmpfs_name}\"]~rw, + +# Run the container, verify output +t POST containers/${cid}/start '' 204 +t POST containers/${cid}/wait '' 200 +t GET containers/${cid}/logs?stdout=true 200 + +like "$(<$WORKDIR/curl.result.out)" ".* ${tmpfs_name}" \ + "'df' output includes tmpfs name" diff --git a/test/apiv2/45-system.at b/test/apiv2/45-system.at index 985d86e56..ad4bdf4f7 100644 --- a/test/apiv2/45-system.at +++ b/test/apiv2/45-system.at @@ -49,18 +49,16 @@ t GET libpod/system/df 200 '.Volumes | length=3' # Prune volumes -# -G --data-urlencode 'volumes=true&filters={"label":["testlabel1=idontmatch"]}' -t POST 'libpod/system/prune?volumes=true&filters=%7B%22label%22:%5B%22testlabel1=idontmatch%22%5D%7D' params='' 200 +t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1=idontmatch"]}' params='' 200 # nothing should have been pruned t GET system/df 200 '.Volumes | length=3' t GET libpod/system/df 200 '.Volumes | length=3' -# -G --data-urlencode 'volumes=true&filters={"label":["testlabel1=testonly"]}' # only foo3 should be pruned because of filter -t POST 'libpod/system/prune?volumes=true&filters=%7B%22label%22:%5B%22testlabel1=testonly%22%5D%7D' params='' 200 .VolumePruneReports[0].Id=foo3 +t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1=testonly"]}' params='' 200 .VolumePruneReports[0].Id=foo3 # only foo2 should be pruned because of filter -t POST 'libpod/system/prune?volumes=true&filters=%7B%22label%22:%5B%22testlabel1%22%5D%7D' params='' 200 .VolumePruneReports[0].Id=foo2 +t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1"]}' params='' 200 .VolumePruneReports[0].Id=foo2 # foo1, the last remaining volume should be pruned without any filters applied t POST 'libpod/system/prune?volumes=true' params='' 200 .VolumePruneReports[0].Id=foo1 diff --git a/test/apiv2/50-secrets.at b/test/apiv2/50-secrets.at index 1ef43381a..c4ffb5883 100644 --- a/test/apiv2/50-secrets.at +++ b/test/apiv2/50-secrets.at @@ -14,18 +14,21 @@ t POST secrets/create '"Name":"mysecret","Data":"c2VjcmV0","Labels":{"fail":"fai t POST secrets/create '"Name":"mysecret","Data":"c2VjcmV0"' 409 # secret inspect -t GET secrets/mysecret 200\ - .Spec.Name=mysecret +t GET secrets/mysecret 200 \ + .Spec.Name=mysecret \ + .Version.Index=1 # secret inspect non-existent secret t GET secrets/bogus 404 # secret list -t GET secrets 200\ - length=1 +t GET secrets 200 \ + length=1 \ + .[0].Spec.Name=mysecret \ + .[0].Version.Index=1 # secret list unsupported filters -t GET secrets?filters=%7B%22name%22%3A%5B%22foo1%22%5D%7D 400 +t GET secrets?filters='{"name":["foo1"]}' 400 # secret rm t DELETE secrets/mysecret 204 diff --git a/test/apiv2/rest_api/test_rest_v2_0_0.py b/test/apiv2/rest_api/test_rest_v2_0_0.py index 05c24f2ea..8a78f5185 100644 --- a/test/apiv2/rest_api/test_rest_v2_0_0.py +++ b/test/apiv2/rest_api/test_rest_v2_0_0.py @@ -64,7 +64,9 @@ class TestApi(unittest.TestCase): super().setUpClass() TestApi.podman = Podman() - TestApi.service = TestApi.podman.open("system", "service", "tcp:localhost:8080", "--time=0") + TestApi.service = TestApi.podman.open( + "system", "service", "tcp:localhost:8080", "--time=0" + ) # give the service some time to be ready... time.sleep(2) @@ -241,7 +243,9 @@ class TestApi(unittest.TestCase): def test_post_create_compat(self): """Create network and connect container during create""" - net = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestNetwork"}) + net = requests.post( + PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestNetwork"} + ) self.assertEqual(net.status_code, 201, net.text) create = requests.post( @@ -450,11 +454,15 @@ class TestApi(unittest.TestCase): self.assertIn(k, o) def test_network_compat(self): - name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10)) + name = "Network_" + "".join( + random.choice(string.ascii_letters) for i in range(10) + ) # Cannot test for 0 existing networks because default "podman" network always exists - create = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": name}) + create = requests.post( + PODMAN_URL + "/v1.40/networks/create", json={"Name": name} + ) self.assertEqual(create.status_code, 201, create.content) obj = json.loads(create.content) self.assertIn(type(obj), (dict,)) @@ -484,8 +492,12 @@ class TestApi(unittest.TestCase): self.assertEqual(inspect.status_code, 404, inspect.content) # network prune - prune_name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10)) - prune_create = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": prune_name}) + prune_name = "Network_" + "".join( + random.choice(string.ascii_letters) for i in range(10) + ) + prune_create = requests.post( + PODMAN_URL + "/v1.40/networks/create", json={"Name": prune_name} + ) self.assertEqual(create.status_code, 201, prune_create.content) prune = requests.post(PODMAN_URL + "/v1.40/networks/prune") @@ -493,9 +505,10 @@ class TestApi(unittest.TestCase): obj = json.loads(prune.content) self.assertTrue(prune_name in obj["NetworksDeleted"]) - def test_volumes_compat(self): - name = "Volume_" + "".join(random.choice(string.ascii_letters) for i in range(10)) + name = "Volume_" + "".join( + random.choice(string.ascii_letters) for i in range(10) + ) ls = requests.get(PODMAN_URL + "/v1.40/volumes") self.assertEqual(ls.status_code, 200, ls.content) @@ -511,7 +524,9 @@ class TestApi(unittest.TestCase): for k in required_keys: self.assertIn(k, obj) - create = requests.post(PODMAN_URL + "/v1.40/volumes/create", json={"Name": name}) + create = requests.post( + PODMAN_URL + "/v1.40/volumes/create", json={"Name": name} + ) self.assertEqual(create.status_code, 201, create.content) # See https://docs.docker.com/engine/api/v1.40/#operation/VolumeCreate @@ -688,15 +703,21 @@ class TestApi(unittest.TestCase): """Verify issue #8865""" pod_name = list() - pod_name.append("Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10))) - pod_name.append("Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10))) + pod_name.append( + "Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10)) + ) + pod_name.append( + "Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10)) + ) r = requests.post( _url("/pods/create"), json={ "name": pod_name[0], "no_infra": False, - "portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}], + "portmappings": [ + {"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89} + ], }, ) self.assertEqual(r.status_code, 201, r.text) @@ -715,7 +736,9 @@ class TestApi(unittest.TestCase): json={ "name": pod_name[1], "no_infra": False, - "portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}], + "portmappings": [ + {"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89} + ], }, ) self.assertEqual(r.status_code, 201, r.text) diff --git a/test/apiv2/test-apiv2 b/test/apiv2/test-apiv2 index 5b1e2ef80..d545df245 100755 --- a/test/apiv2/test-apiv2 +++ b/test/apiv2/test-apiv2 @@ -188,6 +188,13 @@ function t() { # entrypoint path can include a descriptive comment; strip it off path=${path%% *} + # path may include JSONish params that curl will barf on; url-encode them + path="${path//'['/%5B}" + path="${path//']'/%5D}" + path="${path//'{'/%7B}" + path="${path//'}'/%7D}" + path="${path//':'/%3A}" + # curl -X HEAD but without --head seems to wait for output anyway if [[ $method == "HEAD" ]]; then curl_args="--head" diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go index c733db61c..4839d66ec 100644 --- a/test/e2e/build_test.go +++ b/test/e2e/build_test.go @@ -532,4 +532,20 @@ RUN grep CapEff /proc/self/status` // Then Expect(session.ExitCode()).To(Equal(125)) }) + + It("podman build --timestamp flag", func() { + containerfile := `FROM quay.io/libpod/alpine:latest +RUN echo hello` + + containerfilePath := filepath.Join(podmanTest.TempDir, "Containerfile") + err := ioutil.WriteFile(containerfilePath, []byte(containerfile), 0755) + Expect(err).To(BeNil()) + session := podmanTest.Podman([]string{"build", "-t", "test", "--timestamp", "0", "--file", containerfilePath, podmanTest.TempDir}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"image", "inspect", "--format", "{{ .Created }}", "test"}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.OutputToString()).To(Equal("1970-01-01 00:00:00 +0000 UTC")) + }) }) diff --git a/test/e2e/config/containers.conf b/test/e2e/config/containers.conf index fdf679664..bbd712254 100644 --- a/test/e2e/config/containers.conf +++ b/test/e2e/config/containers.conf @@ -55,6 +55,7 @@ umask = "0002" annotations=["run.oci.keep_original_groups=1",] +no_hosts=true [engine] network_cmd_options=["allow_host_loopback=true"] diff --git a/test/e2e/containers_conf_test.go b/test/e2e/containers_conf_test.go index 9c2260c5f..6b1a0d16e 100644 --- a/test/e2e/containers_conf_test.go +++ b/test/e2e/containers_conf_test.go @@ -331,4 +331,26 @@ var _ = Describe("Podman run", func() { Expect(inspect.OutputToString()).To(ContainSubstring("run.oci.keep_original_groups:1")) }) + It("podman run with --add-host and no-hosts=true fails", func() { + session := podmanTest.Podman([]string{"run", "-dt", "--add-host", "test1:127.0.0.1", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session).To(ExitWithError()) + Expect(session.ErrorToString()).To(ContainSubstring("--no-hosts and --add-host cannot be set together")) + + session = podmanTest.Podman([]string{"run", "-dt", "--add-host", "test1:127.0.0.1", "--no-hosts=false", ALPINE, "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + }) + + It("podman run with no-hosts=true /etc/hosts does not include hostname", func() { + session := podmanTest.Podman([]string{"run", "--rm", "--name", "test", ALPINE, "cat", "/etc/hosts"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(Not(ContainSubstring("test"))) + + session = podmanTest.Podman([]string{"run", "--rm", "--name", "test", "--no-hosts=false", ALPINE, "cat", "/etc/hosts"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("test")) + }) }) diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go index 8f695279a..3051031a5 100644 --- a/test/e2e/logs_test.go +++ b/test/e2e/logs_test.go @@ -37,16 +37,18 @@ var _ = Describe("Podman logs", func() { }) for _, log := range []string{"k8s-file", "journald", "json-file"} { + It("all lines: "+log, func() { logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) logc.WaitWithDefaultTimeout() Expect(logc).To(Exit(0)) - cid := logc.OutputToString() + results := podmanTest.Podman([]string{"logs", cid}) results.WaitWithDefaultTimeout() Expect(results).To(Exit(0)) Expect(len(results.OutputToStringArray())).To(Equal(3)) + Expect(results.OutputToString()).To(Equal("podman podman podman")) }) It("tail two lines: "+log, func() { @@ -73,6 +75,18 @@ var _ = Describe("Podman logs", func() { Expect(len(results.OutputToStringArray())).To(Equal(0)) }) + It("tail 99 lines: "+log, func() { + logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) + logc.WaitWithDefaultTimeout() + Expect(logc).To(Exit(0)) + cid := logc.OutputToString() + + results := podmanTest.Podman([]string{"logs", "--tail", "99", cid}) + results.WaitWithDefaultTimeout() + Expect(results).To(Exit(0)) + Expect(len(results.OutputToStringArray())).To(Equal(3)) + }) + It("tail 800 lines: "+log, func() { logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "i=1; while [ \"$i\" -ne 1000 ]; do echo \"line $i\"; i=$((i + 1)); done"}) logc.WaitWithDefaultTimeout() @@ -158,78 +172,6 @@ var _ = Describe("Podman logs", func() { Expect(results).To(Exit(0)) }) - It("for container: "+log, func() { - logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) - logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(0)) - cid := logc.OutputToString() - - results := podmanTest.Podman([]string{"logs", cid}) - results.WaitWithDefaultTimeout() - Expect(results).To(Exit(0)) - Expect(len(results.OutputToStringArray())).To(Equal(3)) - Expect(results.OutputToString()).To(Equal("podman podman podman")) - }) - - It("tail two lines: "+log, func() { - logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) - logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(0)) - cid := logc.OutputToString() - results := podmanTest.Podman([]string{"logs", "--tail", "2", cid}) - results.WaitWithDefaultTimeout() - Expect(results).To(Exit(0)) - Expect(len(results.OutputToStringArray())).To(Equal(2)) - }) - - It("tail 99 lines: "+log, func() { - logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) - logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(0)) - cid := logc.OutputToString() - - results := podmanTest.Podman([]string{"logs", "--tail", "99", cid}) - results.WaitWithDefaultTimeout() - Expect(results).To(Exit(0)) - Expect(len(results.OutputToStringArray())).To(Equal(3)) - }) - - It("tail 2 lines with timestamps: "+log, func() { - logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) - logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(0)) - cid := logc.OutputToString() - - results := podmanTest.Podman([]string{"logs", "--tail", "2", "-t", cid}) - results.WaitWithDefaultTimeout() - Expect(results).To(Exit(0)) - Expect(len(results.OutputToStringArray())).To(Equal(2)) - }) - - It("since time 2017-08-07: "+log, func() { - logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) - logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(0)) - cid := logc.OutputToString() - - results := podmanTest.Podman([]string{"logs", "--since", "2017-08-07T10:10:09.056611202-04:00", cid}) - results.WaitWithDefaultTimeout() - Expect(results).To(Exit(0)) - Expect(len(results.OutputToStringArray())).To(Equal(3)) - }) - - It("with duration 10m: "+log, func() { - logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"}) - logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(0)) - cid := logc.OutputToString() - - results := podmanTest.Podman([]string{"logs", "--since", "10m", cid}) - results.WaitWithDefaultTimeout() - Expect(results).To(Exit(0)) - Expect(len(results.OutputToStringArray())).To(Equal(3)) - }) - It("streaming output: "+log, func() { containerName := "logs-f-rm" @@ -259,17 +201,6 @@ var _ = Describe("Podman logs", func() { } }) - It("podman logs with log-driver=none errors: "+log, func() { - ctrName := "logsctr" - logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--name", ctrName, "-d", "--log-driver", "none", ALPINE, "top"}) - logc.WaitWithDefaultTimeout() - Expect(logc).To(Exit(0)) - - logs := podmanTest.Podman([]string{"logs", "-f", ctrName}) - logs.WaitWithDefaultTimeout() - Expect(logs).To(Not(Exit(0))) - }) - It("follow output stopped container: "+log, func() { containerName := "logs-f" @@ -373,4 +304,15 @@ var _ = Describe("Podman logs", func() { Expect(err).To(BeNil()) Expect(string(out)).To(ContainSubstring(containerName)) }) + + It("podman logs with log-driver=none errors", func() { + ctrName := "logsctr" + logc := podmanTest.Podman([]string{"run", "--name", ctrName, "-d", "--log-driver", "none", ALPINE, "top"}) + logc.WaitWithDefaultTimeout() + Expect(logc).To(Exit(0)) + + logs := podmanTest.Podman([]string{"logs", "-f", ctrName}) + logs.WaitWithDefaultTimeout() + Expect(logs).To(Not(Exit(0))) + }) }) diff --git a/test/e2e/rename_test.go b/test/e2e/rename_test.go index f19413221..14696c0f6 100644 --- a/test/e2e/rename_test.go +++ b/test/e2e/rename_test.go @@ -89,4 +89,25 @@ var _ = Describe("podman rename", func() { Expect(ps.ExitCode()).To(Equal(0)) Expect(ps.OutputToString()).To(ContainSubstring(newName)) }) + + It("Rename a running container with exec sessions", func() { + ctrName := "testCtr" + ctr := podmanTest.Podman([]string{"run", "-d", "--name", ctrName, ALPINE, "top"}) + ctr.WaitWithDefaultTimeout() + Expect(ctr.ExitCode()).To(Equal(0)) + + exec := podmanTest.Podman([]string{"exec", "-d", ctrName, "top"}) + exec.WaitWithDefaultTimeout() + Expect(exec.ExitCode()).To(Equal(0)) + + newName := "aNewName" + rename := podmanTest.Podman([]string{"rename", ctrName, newName}) + rename.WaitWithDefaultTimeout() + Expect(rename.ExitCode()).To(Equal(0)) + + ps := podmanTest.Podman([]string{"ps", "-aq", "--filter", fmt.Sprintf("name=%s", newName), "--format", "{{ .Names }}"}) + ps.WaitWithDefaultTimeout() + Expect(ps.ExitCode()).To(Equal(0)) + Expect(ps.OutputToString()).To(ContainSubstring(newName)) + }) }) diff --git a/test/python/__init__.py b/test/python/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/test/python/__init__.py diff --git a/test/python/docker/__init__.py b/test/python/docker/__init__.py index 351834316..da5630eac 100644 --- a/test/python/docker/__init__.py +++ b/test/python/docker/__init__.py @@ -8,7 +8,7 @@ import tempfile from docker import DockerClient -from test.python.docker import constant +from .compat import constant class Podman(object): @@ -39,7 +39,9 @@ class Podman(object): self.cmd.append("--root=" + os.path.join(self.anchor_directory, "crio")) self.cmd.append("--runroot=" + os.path.join(self.anchor_directory, "crio-run")) - os.environ["REGISTRIES_CONFIG_PATH"] = os.path.join(self.anchor_directory, "registry.conf") + os.environ["REGISTRIES_CONFIG_PATH"] = os.path.join( + self.anchor_directory, "registry.conf" + ) p = configparser.ConfigParser() p.read_dict( { @@ -51,10 +53,14 @@ class Podman(object): with open(os.environ["REGISTRIES_CONFIG_PATH"], "w") as w: p.write(w) - os.environ["CNI_CONFIG_PATH"] = os.path.join(self.anchor_directory, "cni", "net.d") + os.environ["CNI_CONFIG_PATH"] = os.path.join( + self.anchor_directory, "cni", "net.d" + ) os.makedirs(os.environ["CNI_CONFIG_PATH"], exist_ok=True) self.cmd.append("--cni-config-dir=" + os.environ["CNI_CONFIG_PATH"]) - cni_cfg = os.path.join(os.environ["CNI_CONFIG_PATH"], "87-podman-bridge.conflist") + cni_cfg = os.path.join( + os.environ["CNI_CONFIG_PATH"], "87-podman-bridge.conflist" + ) # json decoded and encoded to ensure legal json buf = json.loads( """ diff --git a/test/python/docker/README.md b/test/python/docker/compat/README.md index c10fd636d..50796d66b 100644 --- a/test/python/docker/README.md +++ b/test/python/docker/compat/README.md @@ -13,26 +13,26 @@ To run the tests locally in your sandbox (Fedora 32,33): ### Run the entire test suite +All commands are run from the root of the repository. + ```shell -# python3 -m unittest discover test/python/docker +# python3 -m unittest discover -s test/python/docker ``` Passing the -v option to your test script will instruct unittest.main() to enable a higher level of verbosity, and produce detailed output: ```shell -# python3 -m unittest -v discover test/python/docker +# python3 -m unittest -v discover -s test/python/docker ``` ### Run a specific test class ```shell -# cd test/python/docker -# python3 -m unittest -v tests.test_images +# python3 -m unittest -v test.python.docker.compat.test_images.TestImages ``` ### Run a specific test within the test class ```shell -# cd test/python/docker -# python3 -m unittest tests.test_images.TestImages.test_import_image +# python3 -m unittest test.python.docker.compat.test_images.TestImages.test_tag_valid_image ``` diff --git a/test/python/docker/compat/__init__.py b/test/python/docker/compat/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/test/python/docker/compat/__init__.py diff --git a/test/python/docker/common.py b/test/python/docker/compat/common.py index 11f512495..bdc67c287 100644 --- a/test/python/docker/common.py +++ b/test/python/docker/compat/common.py @@ -1,10 +1,12 @@ from docker import DockerClient -from test.python.docker import constant +from test.python.docker.compat import constant def run_top_container(client: DockerClient): - c = client.containers.create(constant.ALPINE, command="top", detach=True, tty=True, name="top") + c = client.containers.create( + constant.ALPINE, command="top", detach=True, tty=True, name="top" + ) c.start() return c.id diff --git a/test/python/docker/constant.py b/test/python/docker/compat/constant.py index 892293c97..892293c97 100644 --- a/test/python/docker/constant.py +++ b/test/python/docker/compat/constant.py diff --git a/test/python/docker/test_containers.py b/test/python/docker/compat/test_containers.py index 337cacd5c..be70efa67 100644 --- a/test/python/docker/test_containers.py +++ b/test/python/docker/compat/test_containers.py @@ -5,7 +5,8 @@ import unittest from docker import DockerClient, errors -from test.python.docker import Podman, common, constant +from test.python.docker import Podman +from test.python.docker.compat import common, constant class TestContainers(unittest.TestCase): @@ -87,9 +88,11 @@ class TestContainers(unittest.TestCase): self.assertEqual(len(containers), 2) def test_start_container_with_random_port_bind(self): - container = self.client.containers.create(image=constant.ALPINE, - name="containerWithRandomBind", - ports={'1234/tcp': None}) + container = self.client.containers.create( + image=constant.ALPINE, + name="containerWithRandomBind", + ports={"1234/tcp": None}, + ) containers = self.client.containers.list(all=True) self.assertTrue(container in containers) diff --git a/test/python/docker/test_images.py b/test/python/docker/compat/test_images.py index f2b6a5190..842e38f31 100644 --- a/test/python/docker/test_images.py +++ b/test/python/docker/compat/test_images.py @@ -7,7 +7,8 @@ import unittest from docker import DockerClient, errors -from test.python.docker import Podman, common, constant +from test.python.docker import Podman +from test.python.docker.compat import common, constant class TestImages(unittest.TestCase): @@ -78,7 +79,9 @@ class TestImages(unittest.TestCase): self.assertEqual(len(self.client.images.list()), 2) # List images with filter - self.assertEqual(len(self.client.images.list(filters={"reference": "alpine"})), 1) + self.assertEqual( + len(self.client.images.list(filters={"reference": "alpine"})), 1 + ) def test_search_image(self): """Search for image""" @@ -91,7 +94,7 @@ class TestImages(unittest.TestCase): r = self.client.images.search("bogus/bogus") except: return - self.assertTrue(len(r)==0) + self.assertTrue(len(r) == 0) def test_remove_image(self): """Remove image""" diff --git a/test/python/docker/test_system.py b/test/python/docker/compat/test_system.py index 46b90e5f6..131b18991 100644 --- a/test/python/docker/test_system.py +++ b/test/python/docker/compat/test_system.py @@ -5,7 +5,8 @@ import unittest from docker import DockerClient -from test.python.docker import Podman, common, constant +from test.python.docker import Podman, constant +from test.python.docker.compat import common class TestSystem(unittest.TestCase): diff --git a/test/python/requirements.txt b/test/python/requirements.txt new file mode 100644 index 000000000..ee85bf1d1 --- /dev/null +++ b/test/python/requirements.txt @@ -0,0 +1,6 @@ +docker~=4.4.3 + +requests~=2.20.0 +setuptools~=50.3.2 +python-dateutil~=2.8.1 +PyYAML~=5.4.1 diff --git a/vendor/github.com/checkpoint-restore/checkpointctl/LICENSE b/vendor/github.com/checkpoint-restore/checkpointctl/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/checkpoint-restore/checkpointctl/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go new file mode 100644 index 000000000..1c74903ad --- /dev/null +++ b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "time" + + cnitypes "github.com/containernetworking/cni/pkg/types/current" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type CheckpointedPod struct { + PodUID string `json:"io.kubernetes.pod.uid,omitempty"` + ID string `json:"SandboxID,omitempty"` + Name string `json:"io.kubernetes.pod.name,omitempty"` + TerminationGracePeriod int64 `json:"io.kubernetes.pod.terminationGracePeriod,omitempty"` + Namespace string `json:"io.kubernetes.pod.namespace,omitempty"` + ConfigSource string `json:"kubernetes.io/config.source,omitempty"` + ConfigSeen string `json:"kubernetes.io/config.seen,omitempty"` + Manager string `json:"io.container.manager,omitempty"` + Containers []CheckpointedContainer `json:"Containers"` + HostIP string `json:"hostIP,omitempty"` + PodIP string `json:"podIP,omitempty"` + PodIPs []string `json:"podIPs,omitempty"` +} + +type CheckpointedContainer struct { + Name string `json:"io.kubernetes.container.name,omitempty"` + ID string `json:"id,omitempty"` + TerminationMessagePath string `json:"io.kubernetes.container.terminationMessagePath,omitempty"` + TerminationMessagePolicy string `json:"io.kubernetes.container.terminationMessagePolicy,omitempty"` + RestartCounter int32 `json:"io.kubernetes.container.restartCount,omitempty"` + TerminationMessagePathUID string `json:"terminationMessagePathUID,omitempty"` + Image string `json:"Image"` +} + +type CheckpointMetadata struct { + Version int `json:"version"` + CheckpointedPods []CheckpointedPod +} + +const ( + // kubelet archive + CheckpointedPodsFile = "checkpointed.pods" + // container archive + ConfigDumpFile = "config.dump" + SpecDumpFile = "spec.dump" + NetworkStatusFile = "network.status" + CheckpointDirectory = "checkpoint" + RootFsDiffTar = "rootfs-diff.tar" + DeletedFilesFile = "deleted.files" + // pod archive + PodOptionsFile = "pod.options" + PodDumpFile = "pod.dump" +) + +type CheckpointType int + +const ( + // The checkpoint archive contains a kubelet checkpoint + // One or multiple pods and kubelet metadata (checkpointed.pods) + Kubelet CheckpointType = iota + // The checkpoint archive contains one pod including one or multiple containers + Pod + // The checkpoint archive contains a single container + Container + Unknown +) + +// This is a reduced copy of what Podman uses to store checkpoint metadata +type ContainerConfig struct { + ID string `json:"id"` + Name string `json:"name"` + RootfsImageName string `json:"rootfsImageName,omitempty"` + OCIRuntime string `json:"runtime,omitempty"` + CreatedTime time.Time `json:"createdTime"` +} + +// This is metadata stored inside of a Pod checkpoint archive +type CheckpointedPodOptions struct { + Version int `json:"version"` + Containers []string `json:"containers,omitempty"` + MountLabel string `json:"mountLabel"` + ProcessLabel string `json:"processLabel"` +} + +func DetectCheckpointArchiveType(checkpointDirectory string) (CheckpointType, error) { + _, err := os.Stat(filepath.Join(checkpointDirectory, CheckpointedPodsFile)) + if err != nil && !os.IsNotExist(err) { + return Unknown, errors.Wrapf(err, "Failed to access %q\n", CheckpointedPodsFile) + } + if os.IsNotExist(err) { + return Container, nil + } + + return Kubelet, nil +} + +func ReadContainerCheckpointSpecDump(checkpointDirectory string) (*spec.Spec, string, error) { + var specDump spec.Spec + specDumpFile, err := ReadJSONFile(&specDump, checkpointDirectory, SpecDumpFile) + + return &specDump, specDumpFile, err +} + +func ReadContainerCheckpointConfigDump(checkpointDirectory string) (*ContainerConfig, string, error) { + var containerConfig ContainerConfig + configDumpFile, err := ReadJSONFile(&containerConfig, checkpointDirectory, ConfigDumpFile) + + return &containerConfig, configDumpFile, err +} + +func ReadContainerCheckpointDeletedFiles(checkpointDirectory string) ([]string, string, error) { + var deletedFiles []string + deletedFilesFile, err := ReadJSONFile(&deletedFiles, checkpointDirectory, DeletedFilesFile) + + return deletedFiles, deletedFilesFile, err +} + +func ReadContainerCheckpointNetworkStatus(checkpointDirectory string) ([]*cnitypes.Result, string, error) { + var networkStatus []*cnitypes.Result + networkStatusFile, err := ReadJSONFile(&networkStatus, checkpointDirectory, NetworkStatusFile) + + return networkStatus, networkStatusFile, err +} + +func ReadKubeletCheckpoints(checkpointsDirectory string) (*CheckpointMetadata, string, error) { + var checkpointMetadata CheckpointMetadata + checkpointMetadataPath, err := ReadJSONFile(&checkpointMetadata, checkpointsDirectory, CheckpointedPodsFile) + + return &checkpointMetadata, checkpointMetadataPath, err +} + +func GetIPFromNetworkStatus(networkStatus []*cnitypes.Result) net.IP { + if len(networkStatus) == 0 { + return nil + } + // Take the first IP address + if len(networkStatus[0].IPs) == 0 { + return nil + } + IP := networkStatus[0].IPs[0].Address.IP + + return IP +} + +func GetMACFromNetworkStatus(networkStatus []*cnitypes.Result) net.HardwareAddr { + if len(networkStatus) == 0 { + return nil + } + // Take the first device with a defined sandbox + if len(networkStatus[0].Interfaces) == 0 { + return nil + } + var MAC net.HardwareAddr + MAC = nil + for _, n := range networkStatus[0].Interfaces { + if n.Sandbox != "" { + MAC, _ = net.ParseMAC(n.Mac) + + break + } + } + + return MAC +} + +// WriteJSONFile marshalls and writes the given data to a JSON file +func WriteJSONFile(v interface{}, dir, file string) (string, error) { + fileJSON, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", errors.Wrapf(err, "Error marshalling JSON") + } + file = filepath.Join(dir, file) + if err := ioutil.WriteFile(file, fileJSON, 0o600); err != nil { + return "", errors.Wrapf(err, "Error writing to %q", file) + } + + return file, nil +} + +func ReadJSONFile(v interface{}, dir, file string) (string, error) { + file = filepath.Join(dir, file) + content, err := ioutil.ReadFile(file) + if err != nil { + return "", errors.Wrapf(err, "failed to read %s", file) + } + if err = json.Unmarshal(content, v); err != nil { + return "", errors.Wrapf(err, "failed to unmarshal %s", file) + } + + return file, nil +} + +func WriteKubeletCheckpointsMetadata(checkpointMetadata *CheckpointMetadata, dir string) error { + _, err := WriteJSONFile(checkpointMetadata, dir, CheckpointedPodsFile) + + return err +} + +func ByteToString(b int64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + + return fmt.Sprintf("%.1f %ciB", + float64(b)/float64(div), "KMGTPE"[exp]) +} diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go index d9c1d37db..b38340126 100644 --- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go +++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go @@ -198,6 +198,11 @@ func InitCNI(defaultNetName string, confDir string, binDirs ...string) (CNIPlugi return initCNI(nil, "", defaultNetName, confDir, binDirs...) } +// InitCNIWithCache works like InitCNI except that it takes the cni cache directory as third param. +func InitCNIWithCache(defaultNetName, confDir, cacheDir string, binDirs ...string) (CNIPlugin, error) { + return initCNI(nil, cacheDir, defaultNetName, confDir, binDirs...) +} + // Internal function to allow faking out exec functions for testing func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir string, binDirs ...string) (CNIPlugin, error) { if confDir == "" { @@ -208,7 +213,7 @@ func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir strin } plugin := &cniNetworkPlugin{ - cniConfig: libcni.NewCNIConfig(binDirs, exec), + cniConfig: libcni.NewCNIConfigWithCacheDir(binDirs, cacheDir, exec), defaultNetName: netName{ name: defaultNetName, // If defaultNetName is not assigned in initialization, @@ -275,13 +280,19 @@ func loadNetworks(confDir string, cni *libcni.CNIConfig) (map[string]*cniNetwork if strings.HasSuffix(confFile, ".conflist") { confList, err = libcni.ConfListFromFile(confFile) if err != nil { - logrus.Errorf("Error loading CNI config list file %s: %v", confFile, err) + // do not log ENOENT errors + if !os.IsNotExist(err) { + logrus.Errorf("Error loading CNI config list file %s: %v", confFile, err) + } continue } } else { conf, err := libcni.ConfFromFile(confFile) if err != nil { - logrus.Errorf("Error loading CNI config file %s: %v", confFile, err) + // do not log ENOENT errors + if !os.IsNotExist(err) { + logrus.Errorf("Error loading CNI config file %s: %v", confFile, err) + } continue } if conf.Network.Type == "" { @@ -468,7 +479,7 @@ func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache } } - rt, err := buildCNIRuntimeConf(plugin.cacheDir, podNetwork, ifName, podNetwork.RuntimeConfig[network.Name]) + rt, err := buildCNIRuntimeConf(podNetwork, ifName, podNetwork.RuntimeConfig[network.Name]) if err != nil { logrus.Errorf("error building CNI runtime config: %v", err) return err @@ -489,8 +500,15 @@ func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache if cniNet == nil { cniNet, err = plugin.getNetwork(network.Name) if err != nil { - logrus.Errorf(err.Error()) - return err + // try to load the networks again + if err2 := plugin.syncNetworkConfig(); err2 != nil { + logrus.Error(err2) + return err + } + cniNet, err = plugin.getNetwork(network.Name) + if err != nil { + return err + } } } @@ -775,13 +793,12 @@ func (network *cniNetwork) deleteFromNetwork(ctx context.Context, rt *libcni.Run return nil } -func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (*libcni.RuntimeConf, error) { +func buildCNIRuntimeConf(podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (*libcni.RuntimeConf, error) { logrus.Infof("Got pod network %+v", podNetwork) rt := &libcni.RuntimeConf{ ContainerID: podNetwork.ID, NetNS: podNetwork.NetNS, - CacheDir: cacheDir, IfName: ifName, Args: [][2]string{ {"IgnoreUnknown", "1"}, diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go new file mode 100644 index 000000000..b6779bf70 --- /dev/null +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go @@ -0,0 +1,36 @@ +package api + +import "net" + +const ( + // Version of the REST API, not implementation version. + // See openapi.yaml for the definition. + Version = "1.1.0" +) + +// ErrorJSON is returned with "application/json" content type and non-2XX status code +type ErrorJSON struct { + Message string `json:"message"` +} + +// Info is the structure returned by `GET /info` +type Info struct { + APIVersion string `json:"apiVersion"` // REST API version + Version string `json:"version"` // Implementation version + StateDir string `json:"stateDir"` + ChildPID int `json:"childPID"` + NetworkDriver *NetworkDriverInfo `json:"networkDriver,omitempty"` + PortDriver *PortDriverInfo `json:"portDriver,omitempty"` +} + +// NetworkDriverInfo in Info +type NetworkDriverInfo struct { + Driver string `json:"driver"` + DNS []net.IP `json:"dns,omitempty"` +} + +// PortDriverInfo in Info +type PortDriverInfo struct { + Driver string `json:"driver"` + Protos []string `json:"protos"` +} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml new file mode 100644 index 000000000..6a6550c33 --- /dev/null +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml @@ -0,0 +1,161 @@ +# When you made a change to this YAML, please validate with https://editor.swagger.io +openapi: 3.0.3 +info: + version: 1.1.0 + title: RootlessKit API +servers: + - url: 'http://rootlesskit/v1' + description: Local UNIX socket server. The host part of the URL is ignored. +paths: +# /info: API >= 1.1.0 + /info: + get: + responses: + '200': + description: Info. Available since API 1.1.0. + content: + application/json: + schema: + $ref: '#/components/schemas/Info' + /ports: + get: + responses: + '200': + description: An array of PortStatus + content: + application/json: + schema: + $ref: '#/components/schemas/PortStatuses' + post: + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PortSpec' + responses: + '201': + description: PortStatus with ID + content: + application/json: + schema: + $ref: '#/components/schemas/PortStatus' + '/ports/{id}': + delete: + parameters: + - name: id + in: path + required: true + schema: + type: integer + format: int64 + responses: + '200': + description: Null response +components: + schemas: + Proto: + type: string + description: "protocol for listening. Corresponds to Go's net.Listen. The strings with \"4\" and \"6\" suffixes were introduced in API 1.1.0." + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - sctp + - sctp4 + - sctp6 + PortSpec: + required: + - proto + properties: + proto: + $ref: '#/components/schemas/Proto' + parentIP: + type: string + parentPort: + type: integer + format: int32 + minimum: 1 + maximum: 65535 + childIP: + type: string +# future version may support requests with parentPort<=0 for automatic port assignment + childPort: + type: integer + format: int32 + minimum: 1 + maximum: 65535 + PortStatus: + required: + - id + properties: + id: + type: integer + format: int64 + spec: + $ref: '#/components/schemas/PortSpec' + PortStatuses: + type: array + items: + $ref: '#/components/schemas/PortStatus' +# Info: API >= 1.1.0 + Info: + required: + - apiVersion + - version + - stateDir + - childPID + properties: + apiVersion: + type: string + description: "API version, without \"v\" prefix" + example: "1.1.0" + version: + type: string + description: "Implementation version, without \"v\" prefix" + example: "0.42.0-beta.1+dev" + stateDir: + type: string + description: "state dir" + example: "/run/user/1000/rootlesskit" + childPID: + type: integer + description: "child PID" + example: 10042 + networkDriver: + $ref: '#/components/schemas/NetworkDriverInfo' + portDriver: + $ref: '#/components/schemas/PortDriverInfo' + NetworkDriverInfo: + required: + - driver + properties: + driver: + type: string + description: "network driver. Empty when --net=host." + example: "slirp4netns" +# TODO: return TAP info + dns: + type: array + description: "DNS addresses" + items: + type: string + example: ["10.0.2.3"] + PortDriverInfo: + required: + - driver + - supportedProtos + properties: + driver: + type: string + description: "port driver" + example: "builtin" + protos: + type: array + description: "The supported protocol strings for listening ports" + example: ["tcp","udp"] + items: + $ref: '#/components/schemas/Proto' diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go index fc249c2d9..05dc0303c 100644 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go @@ -1,10 +1,11 @@ package child import ( - "fmt" "io" "net" "os" + "strconv" + "strings" "github.com/pkg/errors" "golang.org/x/sys/unix" @@ -101,10 +102,16 @@ func (d *childDriver) handleConnectInit(c *net.UnixConn, req *msg.Request) error func (d *childDriver) handleConnectRequest(c *net.UnixConn, req *msg.Request) error { switch req.Proto { case "tcp": + case "tcp4": + case "tcp6": case "udp": + case "udp4": + case "udp6": default: return errors.Errorf("unknown proto: %q", req.Proto) } + // dialProto does not need "4", "6" suffix + dialProto := strings.TrimSuffix(strings.TrimSuffix(req.Proto, "6"), "4") var dialer net.Dialer ip := req.IP if ip == "" { @@ -114,13 +121,9 @@ func (d *childDriver) handleConnectRequest(c *net.UnixConn, req *msg.Request) er if p == nil { return errors.Errorf("invalid IP: %q", ip) } - p = p.To4() - if p == nil { - return errors.Errorf("unsupported IP (v6?): %s", ip) - } ip = p.String() } - targetConn, err := dialer.Dial(req.Proto, fmt.Sprintf("%s:%d", ip, req.Port)) + targetConn, err := dialer.Dial(dialProto, net.JoinHostPort(ip, strconv.Itoa(req.Port))) if err != nil { return err } diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go index a8c8e0385..a60d99bd9 100644 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go @@ -19,7 +19,7 @@ const ( // Request and Response are encoded as JSON with uint32le length header. type Request struct { Type string // "init" or "connect" - Proto string // "tcp" or "udp" + Proto string // "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6" IP string Port int } diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go index f6e5e56ed..e7ce641e1 100644 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go @@ -15,6 +15,7 @@ import ( "github.com/pkg/errors" + "github.com/rootless-containers/rootlesskit/pkg/api" "github.com/rootless-containers/rootlesskit/pkg/port" "github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg" "github.com/rootless-containers/rootlesskit/pkg/port/builtin/opaque" @@ -56,6 +57,14 @@ type driver struct { nextID int } +func (d *driver) Info(ctx context.Context) (*api.PortDriverInfo, error) { + info := &api.PortDriverInfo{ + Driver: "builtin", + Protos: []string{"tcp", "tcp4", "tcp6", "udp", "udp4", "udp6"}, + } + return info, nil +} + func (d *driver) OpaqueForChild() map[string]string { return map[string]string{ opaque.SocketPath: d.socketPath, @@ -134,9 +143,9 @@ func (d *driver) AddPort(ctx context.Context, spec port.Spec) (*port.Status, err return nil // FIXME } switch spec.Proto { - case "tcp": + case "tcp", "tcp4", "tcp6": err = tcp.Run(d.socketPath, spec, routineStopCh, d.logWriter) - case "udp": + case "udp", "udp4", "udp6": err = udp.Run(d.socketPath, spec, routineStopCh, d.logWriter) default: // NOTREACHED diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go index 9fb801162..7a7a167f1 100644 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go @@ -13,7 +13,7 @@ import ( ) func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, logWriter io.Writer) error { - ln, err := net.Listen("tcp", net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort))) + ln, err := net.Listen(spec.Proto, net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort))) if err != nil { fmt.Fprintf(logWriter, "listen: %v\n", err) return err diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go index fbff2b081..0080dd22c 100644 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go @@ -14,11 +14,11 @@ import ( ) func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, logWriter io.Writer) error { - addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort))) + addr, err := net.ResolveUDPAddr(spec.Proto, net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort))) if err != nil { return err } - c, err := net.ListenUDP("udp", addr) + c, err := net.ListenUDP(spec.Proto, addr) if err != nil { return err } diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go index 41ec33487..c95bfc7c7 100644 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go @@ -3,17 +3,20 @@ package port import ( "context" "net" + + "github.com/rootless-containers/rootlesskit/pkg/api" ) type Spec struct { - Proto string `json:"proto,omitempty"` // either "tcp" or "udp". in future "sctp" will be supported as well. - ParentIP string `json:"parentIP,omitempty"` // IPv4 address. can be empty (0.0.0.0). + // Proto is one of ["tcp", "tcp4", "tcp6", "udp", "udp4", "udp6"]. + // "tcp" may cause listening on both IPv4 and IPv6. (Corresponds to Go's net.Listen .) + Proto string `json:"proto,omitempty"` + ParentIP string `json:"parentIP,omitempty"` // IPv4 or IPv6 address. can be empty (0.0.0.0). ParentPort int `json:"parentPort,omitempty"` ChildPort int `json:"childPort,omitempty"` - // ChildIP is an IPv4 address. + // ChildIP is an IPv4 or IPv6 address. // Default values: // - builtin driver: 127.0.0.1 - // - socat driver: 127.0.0.1 // - slirp4netns driver: slirp4netns's child IP, e.g., 10.0.2.100 ChildIP string `json:"childIP,omitempty"` } @@ -41,6 +44,7 @@ type ChildContext struct { // ParentDriver is a driver for the parent process. type ParentDriver interface { Manager + Info(ctx context.Context) (*api.PortDriverInfo, error) // OpaqueForChild typically consists of socket path // for controlling child from parent OpaqueForChild() map[string]string diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go index a885a76ca..937932642 100644 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go +++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go @@ -152,7 +152,10 @@ func ValidatePortSpec(spec port.Spec, existingPorts map[int]*port.Status) error func validateProto(proto string) error { switch proto { - case "tcp", "udp", "sctp": + case + "tcp", "tcp4", "tcp6", + "udp", "udp4", "udp6", + "sctp", "sctp4", "sctp6": return nil default: return errors.Errorf("unknown proto: %q", proto) diff --git a/vendor/modules.txt b/vendor/modules.txt index 9d0d9b996..1d192693d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -40,6 +40,8 @@ github.com/beorn7/perks/quantile github.com/blang/semver # github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37 github.com/buger/goterm +# github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7 +github.com/checkpoint-restore/checkpointctl/lib # github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/checkpoint-restore/go-criu github.com/checkpoint-restore/go-criu/rpc @@ -235,7 +237,7 @@ github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/internal/dlopen github.com/coreos/go-systemd/v22/journal github.com/coreos/go-systemd/v22/sdjournal -# github.com/cri-o/ocicni v0.2.1-0.20201204103948-b6cbe99b9756 +# github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf github.com/cri-o/ocicni/pkg/ocicni # github.com/cyphar/filepath-securejoin v0.2.2 github.com/cyphar/filepath-securejoin @@ -516,7 +518,8 @@ github.com/prometheus/common/model # github.com/prometheus/procfs v0.0.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs -# github.com/rootless-containers/rootlesskit v0.13.2 +# github.com/rootless-containers/rootlesskit v0.14.0-beta.0 +github.com/rootless-containers/rootlesskit/pkg/api github.com/rootless-containers/rootlesskit/pkg/msgutil github.com/rootless-containers/rootlesskit/pkg/port github.com/rootless-containers/rootlesskit/pkg/port/builtin |