diff options
99 files changed, 2469 insertions, 1966 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 80c954ca0..8d915fbfe 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -62,6 +62,10 @@ env: GCE_SSH_USERNAME: cirrus-ci # Name where this repositories cloud resources are located GCP_PROJECT_ID: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f] + RELEASE_GCPJSON: ENCRYPTED[789d8f7e9a5972ce350fd8e60f1032ccbf4a35c3938b604774b711aad280e12c21faf10e25af1e0ba33597ffb9e39e46] + RELEASE_GCPNAME: ENCRYPTED[417d50488a4bd197bcc925ba6574de5823b97e68db1a17e3a5fde4bcf26576987345e75f8d9ea1c15a156b4612c072a1] + RELEASE_GCPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f] + # Default VM to use unless set or modified by task @@ -270,6 +274,7 @@ meta_task: BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}" GCPJSON: ENCRYPTED[950d9c64ad78f7b1f0c7e499b42dc058d2b23aa67e38b315e68f557f2aba0bf83068d4734f7b1e1bdd22deabe99629df] + # needed for output-masking purposes GCPNAME: ENCRYPTED[b05d469a0dba8cb479cb00cc7c1f6747c91d17622fba260a986b976aa6c817d4077eacffd4613d6d5f23afc4084fab1d] GCPPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f] @@ -294,9 +299,11 @@ image_prune_task: memory: 1 env: - <<: *meta_env_vars + # order is significant, Cirrus not always overriding alias values as intended GCPJSON: ENCRYPTED[4c11d8e09c904c30fc70eecb95c73dec0ddf19976f9b981a0f80f3f6599e8f990bcef93c253ac0277f200850d98528e7] GCPNAME: ENCRYPTED[7f54557ba6e5a437f11283a53e71baec9ca546f48a9835538cc54d297f79968eb1337d4596a1025b14f9d1c5723fbd29] + GCPPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f] + <<: *meta_env_vars timeout_in: 10m @@ -336,9 +343,8 @@ testing_task: unit_test_script: '$SCRIPT_BASE/unit_test.sh |& ${TIMESTAMP}' integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}' system_test_script: '$SCRIPT_BASE/system_test.sh |& ${TIMESTAMP}' - cache_release_archive_script: >- - [[ "$TEST_REMOTE_CLIENT" == "false" ]] || \ - $SCRIPT_BASE/cache_release_archive.sh |& ${TIMESTAMP} + build_release_script: '$SCRIPT_BASE/build_release.sh |& ${TIMESTAMP}' + upload_release_archive_script: '$SCRIPT_BASE/upload_release_archive.sh |& ${TIMESTAMP}' on_failure: failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' @@ -376,9 +382,6 @@ testing_crun_task: unit_test_script: '$SCRIPT_BASE/unit_test.sh |& ${TIMESTAMP}' integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}' system_test_script: '$SCRIPT_BASE/system_test.sh |& ${TIMESTAMP}' - cache_release_archive_script: >- - [[ "$TEST_REMOTE_CLIENT" == "false" ]] || \ - $SCRIPT_BASE/cache_release_archive.sh |& ${TIMESTAMP} on_failure: failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' @@ -459,14 +462,15 @@ special_testing_cross_task: env: matrix: - SPECIALMODE: 'windows' # See docs - SPECIALMODE: 'darwin' + CROSS_PLATFORM: 'windows' + CROSS_PLATFORM: 'darwin' timeout_in: 20m networking_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/networking.sh' setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' - cache_release_archive_script: '$SCRIPT_BASE/cache_release_archive.sh |& ${TIMESTAMP}' + build_release_script: '$SCRIPT_BASE/build_release.sh |& ${TIMESTAMP}' + upload_release_archive_script: '$SCRIPT_BASE/upload_release_archive.sh |& ${TIMESTAMP}' on_failure: failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' @@ -502,6 +506,41 @@ special_testing_cgroupv2_task: always: <<: *standardlogs +special_testing_endpoint_task: + + depends_on: + - "gating" + - "varlink_api" + - "vendor" + + only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + + env: + SPECIALMODE: 'endpoint' # See docs + + timeout_in: 20m + + setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' + integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}' + + on_failure: + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' + + always: + <<: *standardlogs + + +test_building_snap_task: + + depends_on: + - "gating" + + container: + image: yakshaveinc/snapcraft:core18 + snapcraft_script: + - 'apt-get -y update' + - 'cd contrib/snapcraft && snapcraft' + # Test building of new cache-images for future PR testing, in this PR. test_build_cache_images_task: @@ -580,6 +619,9 @@ verify_test_built_images_task: integration_test_script: >- [[ "$PACKER_BUILDER_NAME" == "xfedora-30" ]] || \ $SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP} + build_release_script: >- + [[ "$PACKER_BUILDER_NAME" == "xfedora-30" ]] || \ + '$SCRIPT_BASE/build_release.sh |& ${TIMESTAMP}' system_test_script: >- [[ "$PACKER_BUILDER_NAME" == "xfedora-30" ]] || \ $SCRIPT_BASE/system_test.sh |& ${TIMESTAMP} @@ -594,7 +636,7 @@ success_task: # it blocks PRs from merging if a depends_on task fails only_if: $CIRRUS_BRANCH != $DEST_BRANCH - # ignores any dependent task conditions, include everything except 'release' + # ignores any dependent task conditions depends_on: - "gating" - "vendor" @@ -609,7 +651,9 @@ success_task: - "special_testing_in_podman" - "special_testing_cgroupv2" - "special_testing_cross" + - "special_testing_endpoint" - "test_build_cache_images" + - "test_building_snap" - "verify_test_built_images" env: @@ -623,47 +667,3 @@ success_task: memory: 1 success_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/success.sh |& ${TIMESTAMP}' - - -release_task: - - # Never do this when building images - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' - - # TODO: Uncomment both to not affect pass/fail status of entire job? - # allow_failures: $CI == "true" - # skip_notifications: $CI == "true" - - # Must include everything (YAML anchor/alias cannot be used here) - depends_on: - - "gating" - - "vendor" - - "varlink_api" - - "build_each_commit" - - "build_without_cgo" - - "meta" - - "image_prune" - - "testing" - - "testing_crun" - - "special_testing_rootless" - - "special_testing_in_podman" - - "special_testing_cgroupv2" - - "special_testing_cross" - - "test_build_cache_images" - - "verify_test_built_images" - - "success" - - gce_instance: - image_name: "${IMAGE_BUILDER_CACHE_IMAGE_NAME}" - - timeout_in: 30m - - env: - GCPJSON: ENCRYPTED[789d8f7e9a5972ce350fd8e60f1032ccbf4a35c3938b604774b711aad280e12c21faf10e25af1e0ba33597ffb9e39e46] - GCPNAME: ENCRYPTED[417d50488a4bd197bcc925ba6574de5823b97e68db1a17e3a5fde4bcf26576987345e75f8d9ea1c15a156b4612c072a1] - GCPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f] - - uncache_release_archives_script: '$SCRIPT_BASE/uncache_release_archives.sh |& ${TIMESTAMP}' - - on_failure: - failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' diff --git a/.gitignore b/.gitignore index 4f1100d8e..d3e56ecdf 100644 --- a/.gitignore +++ b/.gitignore @@ -20,5 +20,7 @@ __pycache__ /cmd/podman/varlink/iopodman.go .gopathok test/e2e/e2e.coverprofile -/podman*zip +release.txt +podman-remote*.zip podman*.tar.gz +.idea* @@ -49,8 +49,6 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in [func GenerateKube(name: string, service: bool) KubePodService](#GenerateKube) -[func GenerateSystemd(name: string, restart: string, timeout: int, useName: bool) string](#GenerateSystemd) - [func GetAttachSockets(name: string) Sockets](#GetAttachSockets) [func GetContainer(id: string) Container](#GetContainer) @@ -482,11 +480,6 @@ error will be returned. See also [ImportImage](ImportImage). method GenerateKube(name: [string](https://godoc.org/builtin#string), service: [bool](https://godoc.org/builtin#bool)) [KubePodService](#KubePodService)</div> GenerateKube generates a Kubernetes v1 Pod description of a Podman container or pod and its containers. The description is in YAML. See also [ReplayKube](ReplayKube). -### <a name="GenerateSystemd"></a>func GenerateSystemd -<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;"> - -method GenerateSystemd(name: [string](https://godoc.org/builtin#string), restart: [string](https://godoc.org/builtin#string), timeout: [int](https://godoc.org/builtin#int), useName: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div> - ### <a name="GetAttachSockets"></a>func GetAttachSockets <div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;"> @@ -1368,6 +1361,8 @@ pids [int](https://godoc.org/builtin#int) ### <a name="Create"></a>type Create Create is an input structure for creating containers. +args[0] is the image name or id +args[1-] are the new commands if changed args [[]string](#[]string) diff --git a/Dockerfile.fedora b/Dockerfile.fedora index 0f82fdc5c..9b1568b0b 100644 --- a/Dockerfile.fedora +++ b/Dockerfile.fedora @@ -27,36 +27,25 @@ RUN dnf -y install btrfs-progs-devel \ xz \ slirp4netns \ container-selinux \ + containernetworking-plugins \ + iproute \ iptables && dnf clean all -# Install CNI plugins -ENV CNI_COMMIT 485be65581341430f9106a194a98f0f2412245fb -ENV -RUN set -x \ - && export GOPATH="$(mktemp -d)" GOCACHE="$(mktemp -d)" \ - && git clone https://github.com/containernetworking/plugins.git "$GOPATH/src/github.com/containernetworking/plugins" \ - && cd "$GOPATH/src/github.com/containernetworking/plugins" \ - && git checkout -q "$CNI_COMMIT" \ - && ./build_linux.sh \ - && mkdir -p /usr/libexec/cni \ - && cp bin/* /usr/libexec/cni \ - && rm -rf "$GOPATH" - # Install ginkgo RUN set -x \ - && export GOPATH=/go \ + && export GOPATH=/go GOCACHE="$(mktemp -d)" \ && go get -u github.com/onsi/ginkgo/ginkgo \ && install -D -m 755 "$GOPATH"/bin/ginkgo /usr/bin/ # Install gomega RUN set -x \ - && export GOPATH=/go \ + && export GOPATH=/go GOCACHE="$(mktemp -d)" \ && go get github.com/onsi/gomega/... # Install conmon ENV CONMON_COMMIT 6f3572558b97bc60dd8f8c7f0807748e6ce2c440 RUN set -x \ - && export GOPATH="$(mktemp -d)" \ + && export GOPATH="$(mktemp -d)" GOCACHE="$(mktemp -d)" \ && git clone https://github.com/containers/conmon.git "$GOPATH/src/github.com/containers/conmon.git" \ && cd "$GOPATH/src/github.com/containers/conmon.git" \ && git fetch origin --tags \ @@ -1,4 +1,5 @@ export GO111MODULE=off +export GOPROXY=https://proxy.golang.org GO ?= go DESTDIR ?= @@ -81,13 +82,12 @@ LIBSECCOMP_COMMIT := release-2.3 # caller may override in special circumstances if needed. GINKGOTIMEOUT ?= -timeout=90m -RELEASE_VERSION ?= $(shell git fetch --tags && git describe HEAD 2> /dev/null) -RELEASE_NUMBER ?= $(shell echo $(RELEASE_VERSION) | sed 's/-.*//') -RELEASE_DIST ?= $(shell ( source /etc/os-release; echo $$ID )) -RELEASE_DIST_VER ?= $(shell ( source /etc/os-release; echo $$VERSION_ID | cut -d '.' -f 1)) -RELEASE_ARCH ?= $(shell go env GOARCH 2> /dev/null) -RELEASE_BASENAME := $(shell basename $(PROJECT)) - +RELEASE_VERSION ?= $(shell hack/get_release_info.sh VERSION) +RELEASE_NUMBER ?= $(shell hack/get_release_info.sh NUMBER) +RELEASE_DIST ?= $(shell hack/get_release_info.sh DIST) +RELEASE_DIST_VER ?= $(shell hack/get_release_info.sh DIST_VER) +RELEASE_ARCH ?= $(shell hack/get_release_info.sh ARCH) +RELEASE_BASENAME := $(shell hack/get_release_info.sh BASENAME) # If GOPATH not specified, use one in the local directory ifeq ($(GOPATH),) @@ -164,11 +164,9 @@ podman: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman podman-remote: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on remote environment $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS) remoteclient" -o bin/$@ $(PROJECT)/cmd/podman -podman-remote-darwin: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on remote OSX environment - CGO_ENABLED=0 GOOS=darwin $(GO_BUILD) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@ $(PROJECT)/cmd/podman - -podman-remote-windows: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman for a remote windows environment - CGO_ENABLED=0 GOOS=windows $(GO_BUILD) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@.exe $(PROJECT)/cmd/podman +podman-remote-%: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build podman for a specific GOOS + $(eval BINSFX := $(shell test "$*" != "windows" || echo ".exe")) + CGO_ENABLED=0 GOOS=$* $(GO_BUILD) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@$(BINSFX) $(PROJECT)/cmd/podman local-cross: $(CROSS_BUILD_TARGETS) ## Cross local compilation @@ -182,8 +180,9 @@ clean: ## Clean artifacts rm -rf \ .gopathok \ _output \ - podman*.zip \ - podman*.tar.gz \ + release.txt + $(wildcard podman-remote*.zip) \ + $(wildcard podman*.tar.gz) \ bin \ build \ docs/remote \ @@ -232,7 +231,7 @@ testunit: libpodimage ## Run unittest on the built image localunit: test/goecho/goecho varlink_generate ginkgo \ -r \ - --skipPackage test/e2e,pkg/apparmor \ + --skipPackage test/e2e,pkg/apparmor,test/endpoint \ --cover \ --covermode atomic \ --tags "$(BUILDTAGS)" \ @@ -247,6 +246,9 @@ ginkgo: ginkgo-remote: ginkgo -v -tags "$(BUILDTAGS) remoteclient" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor test/e2e/. +endpoint: + ginkgo -v -tags "$(BUILDTAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor -debug test/endpoint/. + localintegration: varlink_generate test-binaries ginkgo remoteintegration: varlink_generate test-binaries ginkgo-remote @@ -297,23 +299,6 @@ vagrant-check: binaries: varlink_generate podman podman-remote ## Build podman -# Zip archives are supported on all platforms + allows embedding metadata -podman.zip: binaries docs - $(eval TMPDIR := $(shell mktemp -d -p '' $@_XXXX)) - test -n "$(TMPDIR)" - $(MAKE) install "DESTDIR=$(TMPDIR)" "PREFIX=$(TMPDIR)/usr" - # Encoded RELEASE_INFO format depended upon by CI tooling - # X-RELEASE-INFO format depended upon by CI tooling - cd "$(TMPDIR)" && echo \ - "X-RELEASE-INFO: $(RELEASE_BASENAME) $(RELEASE_VERSION) $(RELEASE_DIST) $(RELEASE_DIST_VER) $(RELEASE_ARCH)" | \ - zip --recurse-paths --archive-comment "$(CURDIR)/$@" "./" - -rm -rf "$(TMPDIR)" - -podman-remote-%.zip: podman-remote-% - # Don't label darwin/windows cros-compiles with local distribution & version - echo "X-RELEASE-INFO: podman-remote $(RELEASE_VERSION) $* cc $(RELEASE_ARCH)" | \ - zip --archive-comment "$(CURDIR)/$@" ./bin/$<* - install.catatonit: ./hack/install_catatonit.sh @@ -330,19 +315,58 @@ docs: $(MANPAGES) ## Generate documentation install-podman-remote-docs: docs @(cd docs; ./podman-remote.sh ./remote) +# When publishing releases include critical build-time details +.PHONY: release.txt +release.txt: + # X-RELEASE-INFO format depended upon by automated tooling + echo -n "X-RELEASE-INFO:" > "$@" + for field in "$(RELEASE_BASENAME)" "$(RELEASE_VERSION)" \ + "$(RELEASE_DIST)" "$(RELEASE_DIST_VER)" "$(RELEASE_ARCH)"; do \ + echo -n " $$field"; done >> "$@" + echo "" >> "$@" + +podman-$(RELEASE_NUMBER).tar.gz: binaries docs release.txt + $(eval TMPDIR := $(shell mktemp -d -p '' podman_XXXX)) + $(eval SUBDIR := podman-$(RELEASE_NUMBER)) + mkdir -p "$(TMPDIR)/$(SUBDIR)" + $(MAKE) install.bin install.man install.cni install.systemd "DESTDIR=$(TMPDIR)/$(SUBDIR)" "PREFIX=/usr" + # release.txt location and content depended upon by automated tooling + cp release.txt "$(TMPDIR)/" + tar -czvf $@ --xattrs -C "$(TMPDIR)" "./release.txt" "./$(SUBDIR)" + -rm -rf "$(TMPDIR)" + +# Must call make in-line: Dependency-spec. w/ wild-card also consumes variable value. +podman-remote-$(RELEASE_NUMBER)-%.zip: + $(MAKE) podman-remote-$* install-podman-remote-docs release.txt \ + RELEASE_BASENAME=$(shell hack/get_release_info.sh REMOTENAME) \ + RELEASE_DIST=$* RELEASE_DIST_VER="-" + $(eval TMPDIR := $(shell mktemp -d -p '' $podman_remote_XXXX)) + $(eval SUBDIR := podman-$(RELEASE_VERSION)) + $(eval BINSFX := $(shell test "$*" != "windows" || echo ".exe")) + mkdir -p "$(TMPDIR)/$(SUBDIR)" + # release.txt location and content depended upon by automated tooling + cp release.txt "$(TMPDIR)/" + cp ./bin/podman-remote-$*$(BINSFX) "$(TMPDIR)/$(SUBDIR)/podman$(BINSFX)" + cp -r ./docs/remote "$(TMPDIR)/$(SUBDIR)/docs/" + $(eval DOCFILE := $(TMPDIR)/$(SUBDIR)/docs/podman.1) + cp docs/podman-remote.1 "$(DOCFILE)" + sed -i 's/podman\\*-remote/podman/g' "$(DOCFILE)" + sed -i 's/Podman\\*-remote/Podman\ for\ $*/g' "$(DOCFILE)" + sed -i 's/podman\.conf/podman\-remote\.conf/g' "$(DOCFILE)" + sed -i 's/A\ remote\ CLI\ for\ Podman\:\ //g' "$(DOCFILE)" + cd "$(TMPDIR)" && \ + zip --recurse-paths "$(CURDIR)/$@" "./release.txt" "./" + -rm -rf "$(TMPDIR)" + +.PHONY: podman-release +podman-release: + rm -f release.txt + $(MAKE) podman-$(RELEASE_NUMBER).tar.gz -brew-pkg: install-podman-remote-docs podman-remote-darwin - @mkdir -p ./brew - @cp ./bin/podman-remote-darwin ./brew/podman - @cp -r ./docs/remote ./brew/docs/ - @cp docs/podman-remote.1 ./brew/docs/podman.1 - @cp docs/podman-remote.conf.5 ./brew/docs/podman-remote.conf.5 - @sed -i 's/podman\\*-remote/podman/g' ./brew/docs/podman.1 - @sed -i 's/Podman\\*-remote/Podman\ for\ Mac/g' ./brew/docs/podman.1 - @sed -i 's/podman\.conf/podman\-remote\.conf/g' ./brew/docs/podman.1 - @sed -i 's/A\ remote\ CLI\ for\ Podman\:\ //g' ./brew/docs/podman.1 - tar -czvf podman-${RELEASE_NUMBER}.tar.gz ./brew - @rm -rf ./brew +.PHONY: podman-remote-%-release +podman-remote-%-release: + rm -f release.txt + $(MAKE) podman-remote-$(RELEASE_NUMBER)-$*.zip docker-docs: docs (cd docs; ./dckrman.sh *.1) @@ -8,6 +8,9 @@ popularized by Kubernetes. Libpod also contains the Pod Manager tool `(Podman)` * [Latest Version: 1.4.4](https://github.com/containers/libpod/releases/latest) * [Continuous Integration:](contrib/cirrus/README.md) [![Build Status](https://api.cirrus-ci.com/github/containers/libpod.svg)](https://cirrus-ci.com/github/containers/libpod/master) * [GoDoc: ![GoDoc](https://godoc.org/github.com/containers/libpod/libpod?status.svg)](https://godoc.org/github.com/containers/libpod/libpod) +* Automated continuous release downloads (including remote-client): + * Master Branch: [https://storage.cloud.google.com/libpod-master-releases/](https://storage.cloud.google.com/libpod-master-releases/) + * Pull-requests: [https://storage.cloud.google.com/libpod-pr-releases/](https://storage.cloud.google.com/libpod-pr-releases/) ## Overview and scope diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go index e7ad921da..98e7aed4b 100644 --- a/cmd/podman/cliconfig/config.go +++ b/cmd/podman/cliconfig/config.go @@ -156,6 +156,7 @@ type GenerateKubeValues struct { type GenerateSystemdValues struct { PodmanCommand Name bool + Files bool RestartPolicy string StopTimeout int } @@ -423,6 +424,7 @@ type PushValues struct { CertDir string Compress bool Creds string + Digestfile string Format string Quiet bool RemoveSignatures bool @@ -507,6 +509,7 @@ type SignValues struct { PodmanCommand Directory string SignBy string + CertDir string } type StartValues struct { diff --git a/cmd/podman/commands.go b/cmd/podman/commands.go index e23918a5b..77c76d1b7 100644 --- a/cmd/podman/commands.go +++ b/cmd/podman/commands.go @@ -11,6 +11,7 @@ const remoteclient = false // Commands that the local client implements func getMainCommands() []*cobra.Command { rootCommands := []*cobra.Command{ + _cpCommand, _playCommand, _loginCommand, _logoutCommand, @@ -39,6 +40,7 @@ func getImageSubCommands() []*cobra.Command { func getContainerSubCommands() []*cobra.Command { return []*cobra.Command{ + _cpCommand, _cleanupCommand, _mountCommand, _refreshCommand, diff --git a/cmd/podman/container.go b/cmd/podman/container.go index 557f5fafa..66b58f06e 100644 --- a/cmd/podman/container.go +++ b/cmd/podman/container.go @@ -55,7 +55,6 @@ var ( _commitCommand, _containerExistsCommand, _contInspectSubCommand, - _cpCommand, _diffCommand, _execCommand, _exportCommand, diff --git a/cmd/podman/cp.go b/cmd/podman/cp.go index ad7253ac0..5e1ca8312 100644 --- a/cmd/podman/cp.go +++ b/cmd/podman/cp.go @@ -55,7 +55,6 @@ func init() { flags.BoolVar(&cpCommand.Pause, "pause", false, "Pause the container while copying") cpCommand.SetHelpTemplate(HelpTemplate()) cpCommand.SetUsageTemplate(UsageTemplate()) - rootCmd.AddCommand(cpCommand.Command) } func cpCmd(c *cliconfig.CpValues) error { diff --git a/cmd/podman/generate.go b/cmd/podman/generate.go index 98bfb00a1..196556bc5 100644 --- a/cmd/podman/generate.go +++ b/cmd/podman/generate.go @@ -18,11 +18,14 @@ var ( // Commands that are universally implemented generateCommands = []*cobra.Command{ _containerKubeCommand, - _containerSystemdCommand, } ) func init() { + // Systemd-service generation is not supported for remote-clients. + if !remoteclient { + generateCommands = append(generateCommands, _containerSystemdCommand) + } generateCommand.Command = _generateCommand generateCommand.AddCommand(generateCommands...) generateCommand.SetUsageTemplate(UsageTemplate()) diff --git a/cmd/podman/generate_systemd.go b/cmd/podman/generate_systemd.go index 222fc4c98..aa202a68d 100644 --- a/cmd/podman/generate_systemd.go +++ b/cmd/podman/generate_systemd.go @@ -5,7 +5,6 @@ import ( "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/pkg/adapter" - "github.com/containers/libpod/pkg/systemdgen" "github.com/pkg/errors" "github.com/spf13/cobra" ) @@ -40,7 +39,10 @@ func init() { containerSystemdCommand.SetHelpTemplate(HelpTemplate()) containerSystemdCommand.SetUsageTemplate(UsageTemplate()) flags := containerSystemdCommand.Flags() - flags.BoolVarP(&containerSystemdCommand.Name, "name", "n", false, "use the container name instead of ID") + flags.BoolVarP(&containerSystemdCommand.Name, "name", "n", false, "use the container/pod name instead of ID") + if !remoteclient { + flags.BoolVarP(&containerSystemdCommand.Files, "files", "f", false, "generate files instead of printing to stdout") + } flags.IntVarP(&containerSystemdCommand.StopTimeout, "timeout", "t", -1, "stop timeout override") flags.StringVar(&containerSystemdCommand.RestartPolicy, "restart-policy", "on-failure", "applicable systemd restart-policy") } @@ -56,10 +58,6 @@ func generateSystemdCmd(c *cliconfig.GenerateSystemdValues) error { if c.Flag("timeout").Changed && c.StopTimeout < 0 { return errors.New("timeout value must be 0 or greater") } - // Make sure the input restart policy is valid - if err := systemdgen.ValidateRestartPolicy(c.RestartPolicy); err != nil { - return err - } unit, err := runtime.GenerateSystemd(c) if err != nil { diff --git a/cmd/podman/main_local.go b/cmd/podman/main_local.go index 587c8260f..648dc166e 100644 --- a/cmd/podman/main_local.go +++ b/cmd/podman/main_local.go @@ -140,7 +140,7 @@ func setupRootless(cmd *cobra.Command, args []string) error { became, ret, err := rootless.TryJoinFromFilePaths("", false, []string{pausePidPath}) if err != nil { logrus.Errorf("cannot join pause process. You may need to remove %s and stop all containers", pausePidPath) - logrus.Errorf("you can use `system migrate` to recreate the pause process") + logrus.Errorf("you can use `%s system migrate` to recreate the pause process", os.Args[0]) logrus.Errorf(err.Error()) os.Exit(1) } diff --git a/cmd/podman/push.go b/cmd/podman/push.go index 43df8c2de..13ebe8a1f 100644 --- a/cmd/podman/push.go +++ b/cmd/podman/push.go @@ -51,6 +51,7 @@ func init() { pushCommand.SetUsageTemplate(UsageTemplate()) flags := pushCommand.Flags() flags.StringVar(&pushCommand.Creds, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry") + flags.StringVar(&pushCommand.Digestfile, "digestfile", "", "After copying the image, write the digest of the resulting image to the file") flags.StringVarP(&pushCommand.Format, "format", "f", "", "Manifest type (oci, v2s1, or v2s2) to use when pushing an image using the 'dir:' transport (default is manifest type of source)") flags.BoolVarP(&pushCommand.Quiet, "quiet", "q", false, "Don't output progress information when pushing images") flags.BoolVar(&pushCommand.RemoveSignatures, "remove-signatures", false, "Discard any pre-existing signatures in the image") @@ -143,5 +144,5 @@ func pushCmd(c *cliconfig.PushValues) error { SignBy: signBy, } - return runtime.Push(getContext(), srcName, destName, manifestType, c.Authfile, c.SignaturePolicy, writer, c.Compress, so, &dockerRegistryOptions, nil) + return runtime.Push(getContext(), srcName, destName, manifestType, c.Authfile, c.String("digestfile"), c.SignaturePolicy, writer, c.Compress, so, &dockerRegistryOptions, nil) } diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go index 094330e24..acbd53dba 100644 --- a/cmd/podman/shared/create.go +++ b/cmd/podman/shared/create.go @@ -81,7 +81,7 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod. if len(c.InputArgs) != 0 { name = c.InputArgs[0] } else { - return nil, nil, errors.Errorf("error, no input arguments were provided") + return nil, nil, errors.Errorf("error, image name not provided") } pullType, err := util.ValidatePullType(c.String("pull")) diff --git a/cmd/podman/shared/intermediate.go b/cmd/podman/shared/intermediate.go index c6c32f8a9..5aaac8687 100644 --- a/cmd/podman/shared/intermediate.go +++ b/cmd/podman/shared/intermediate.go @@ -114,7 +114,7 @@ func (f GenericCLIResults) findResult(flag string) GenericCLIResult { if ok { return val } - logrus.Errorf("unable to find flag %s", flag) + logrus.Debugf("unable to find flag %s", flag) return nil } @@ -366,12 +366,10 @@ func NewIntermediateLayer(c *cliconfig.PodmanCommand, remote bool) GenericCLIRes m["add-host"] = newCRStringSlice(c, "add-host") m["annotation"] = newCRStringSlice(c, "annotation") m["attach"] = newCRStringSlice(c, "attach") - m["authfile"] = newCRString(c, "authfile") m["blkio-weight"] = newCRString(c, "blkio-weight") m["blkio-weight-device"] = newCRStringSlice(c, "blkio-weight-device") m["cap-add"] = newCRStringSlice(c, "cap-add") m["cap-drop"] = newCRStringSlice(c, "cap-drop") - m["cgroupns"] = newCRString(c, "cgroupns") m["cgroup-parent"] = newCRString(c, "cgroup-parent") m["cidfile"] = newCRString(c, "cidfile") m["conmon-pidfile"] = newCRString(c, "conmon-pidfile") @@ -395,7 +393,6 @@ func NewIntermediateLayer(c *cliconfig.PodmanCommand, remote bool) GenericCLIRes m["dns-search"] = newCRStringSlice(c, "dns-search") m["entrypoint"] = newCRString(c, "entrypoint") m["env"] = newCRStringArray(c, "env") - m["env-host"] = newCRBool(c, "env-host") m["env-file"] = newCRStringSlice(c, "env-file") m["expose"] = newCRStringSlice(c, "expose") m["gidmap"] = newCRStringSlice(c, "gidmap") @@ -407,7 +404,6 @@ func NewIntermediateLayer(c *cliconfig.PodmanCommand, remote bool) GenericCLIRes m["healthcheck-start-period"] = newCRString(c, "health-start-period") m["healthcheck-timeout"] = newCRString(c, "health-timeout") m["hostname"] = newCRString(c, "hostname") - m["http-proxy"] = newCRBool(c, "http-proxy") m["image-volume"] = newCRString(c, "image-volume") m["init"] = newCRBool(c, "init") m["init-path"] = newCRString(c, "init-path") @@ -465,6 +461,10 @@ func NewIntermediateLayer(c *cliconfig.PodmanCommand, remote bool) GenericCLIRes m["workdir"] = newCRString(c, "workdir") // global flag if !remote { + m["authfile"] = newCRString(c, "authfile") + m["cgroupns"] = newCRString(c, "cgroupns") + m["env-host"] = newCRBool(c, "env-host") + m["http-proxy"] = newCRBool(c, "http-proxy") m["trace"] = newCRBool(c, "trace") m["syslog"] = newCRBool(c, "syslog") } diff --git a/cmd/podman/sign.go b/cmd/podman/sign.go index de289047a..63ba9b904 100644 --- a/cmd/podman/sign.go +++ b/cmd/podman/sign.go @@ -46,7 +46,7 @@ func init() { flags := signCommand.Flags() flags.StringVarP(&signCommand.Directory, "directory", "d", "", "Define an alternate directory to store signatures") flags.StringVar(&signCommand.SignBy, "sign-by", "", "Name of the signing key") - + flags.StringVar(&signCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys") } // SignatureStoreDir defines default directory to store signatures @@ -76,6 +76,13 @@ func signCmd(c *cliconfig.SignValues) error { } } + sc := runtime.SystemContext() + sc.DockerCertPath = c.CertDir + + dockerRegistryOptions := image.DockerRegistryOptions{ + DockerCertPath: c.CertDir, + } + mech, err := signature.NewGPGSigningMechanism() if err != nil { return errors.Wrap(err, "error initializing GPG") @@ -85,7 +92,7 @@ func signCmd(c *cliconfig.SignValues) error { return errors.Wrap(err, "signing is not supported") } - systemRegistriesDirPath := trust.RegistriesDirPath(runtime.SystemContext()) + systemRegistriesDirPath := trust.RegistriesDirPath(sc) registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath) if err != nil { return errors.Wrapf(err, "error reading registry configuration") @@ -96,10 +103,14 @@ func signCmd(c *cliconfig.SignValues) error { if err != nil { return errors.Wrapf(err, "error parsing image name") } - rawSource, err := srcRef.NewImageSource(getContext(), runtime.SystemContext()) + rawSource, err := srcRef.NewImageSource(getContext(), sc) if err != nil { return errors.Wrapf(err, "error getting image source") } + err = rawSource.Close() + if err != nil { + logrus.Errorf("unable to close new image source %q", err) + } manifest, _, err := rawSource.GetManifest(getContext(), nil) if err != nil { return errors.Wrapf(err, "error getting manifest") @@ -114,7 +125,7 @@ func signCmd(c *cliconfig.SignValues) error { if err != nil { return err } - newImage, err := runtime.ImageRuntime().New(getContext(), signimage, rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{SignBy: signby}, nil, util.PullImageMissing) + newImage, err := runtime.ImageRuntime().New(getContext(), signimage, rtc.SignaturePolicyPath, "", os.Stderr, &dockerRegistryOptions, image.SigningOptions{SignBy: signby}, nil, util.PullImageMissing) if err != nil { return errors.Wrapf(err, "error pulling image %s", signimage) } diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink index 1b10416a2..752e28256 100644 --- a/cmd/podman/varlink/io.podman.varlink +++ b/cmd/podman/varlink/io.podman.varlink @@ -274,6 +274,8 @@ type Sockets( ) # Create is an input structure for creating containers. +# args[0] is the image name or id +# args[1-] are the new commands if changed type Create ( args: []string, addHost: ?[]string, @@ -1241,8 +1243,6 @@ method GetLayersMapWithImageInfo() -> (layerMap: string) # BuildImageHierarchyMap is for the development of Podman and should not be used. method BuildImageHierarchyMap(name: string) -> (imageInfo: string) -method GenerateSystemd(name: string, restart: string, timeout: int, useName: bool) -> (unit: string) - # ImageNotFound means the image could not be found by the provided name or ID in local storage. error ImageNotFound (id: string, reason: string) @@ -1283,4 +1283,4 @@ error WantsMoreRequired (reason: string) error ErrCtrStopped (id: string) # This function requires CGroupsV2 to run in rootless mode. -error ErrRequiresCgroupsV2ForRootless(reason: string)
\ No newline at end of file +error ErrRequiresCgroupsV2ForRootless(reason: string) diff --git a/cni/87-podman-bridge.conflist b/cni/87-podman-bridge.conflist index a5e241c80..9db416a19 100644 --- a/cni/87-podman-bridge.conflist +++ b/cni/87-podman-bridge.conflist @@ -1,25 +1,38 @@ { - "cniVersion": "0.3.0", + "cniVersion": "0.4.0", "name": "podman", "plugins": [ - { - "type": "bridge", - "bridge": "cni0", - "isGateway": true, - "ipMasq": true, - "ipam": { - "type": "host-local", - "subnet": "10.88.0.0/16", - "routes": [ - { "dst": "0.0.0.0/0" } - ] - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall", + "backend": "iptables" + } ] } diff --git a/cni/README.md b/cni/README.md index d35bc4111..2683df714 100644 --- a/cni/README.md +++ b/cni/README.md @@ -1,15 +1,17 @@ ## `cni` ## -There are a wide variety of different [CNI][cni] network configurations. This +There are a wide variety of different [CNI](https://github.com/containernetworking/cni) network configurations. This directory just contains an example configuration that can be used as the basis for your own configuration. To use this configuration, place it in `/etc/cni/net.d` (or the directory specified by `cni_config_dir` in your `libpod.conf`). -In addition, you need to install the [CNI plugins][cni] necessary into -`/opt/cni/bin` (or the directory specified by `cni_plugin_dir`). The -two plugins necessary for the example CNI configurations are `portmap` and -`bridge`. +For example a basic network configuration can be achieved with: -[cni]: https://github.com/containernetworking/plugins +```bash +sudo mkdir -p /etc/cni/net.d +curl -qsSL https://raw.githubusercontent.com/containers/libpod/master/cni/87-podman-bridge.conflist | sudo tee /etc/cni/net.d/87-podman-bridge.conf +``` + +Dependent upon your CNI configuration, you will need to install as a minimum the `port` and `bridge` [CNI plugins](https://github.com/containernetworking/plugins) into `/opt/cni/bin` (or the directory specified by `cni_plugin_dir` in libpod.conf). Please refer to the [CNI](https://github.com/containernetworking) project page in GitHub for more information. diff --git a/completions/bash/podman b/completions/bash/podman index 962c15a95..e6ffb135f 100644 --- a/completions/bash/podman +++ b/completions/bash/podman @@ -1758,6 +1758,7 @@ _podman_mount() { _podman_push() { local boolean_options=" --compress + --digestflag --help -h --quiet @@ -2668,6 +2669,7 @@ _podman_container_runlabel() { _podman_image_sign() { local options_with_args=" + --cert-dir -d --directory --sign-by diff --git a/contrib/cirrus/99-do-not-use-google-subnets.conflist b/contrib/cirrus/99-do-not-use-google-subnets.conflist new file mode 100644 index 000000000..e9ab638ed --- /dev/null +++ b/contrib/cirrus/99-do-not-use-google-subnets.conflist @@ -0,0 +1,21 @@ +{ + "cniVersion": "0.4.0", + "name": "do-not-use-google-subnets", + "plugins": [ + { + "type": "bridge", + "name": "do-not-use-google-subnets", + "bridge": "do-not-use-google-subnets", + "ipam": { + "type": "host-local", + "ranges": [ + [ + { + "subnet": "10.128.0.0/9" + } + ] + ] + } + } + ] +} diff --git a/contrib/cirrus/build_release.sh b/contrib/cirrus/build_release.sh new file mode 100755 index 000000000..287643f47 --- /dev/null +++ b/contrib/cirrus/build_release.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +source $(dirname $0)/lib.sh + +req_env_var TEST_REMOTE_CLIENT OS_RELEASE_ID GOSRC + +cd $GOSRC + +if [[ "$TEST_REMOTE_CLIENT" == "true" ]] && [[ -z "$CROSS_PLATFORM" ]] +then + CROSS_PLATFORM=linux +fi + +if [[ -n "$CROSS_PLATFORM" ]] +then + echo "Compiling podman-remote release archive for ${CROSS_PLATFORM}" + case "$CROSS_PLATFORM" in + linux) ;& + windows) ;& + darwin) + make podman-remote-${CROSS_PLATFORM}-release + ;; + *) + die 1 "Unknown/unsupported cross-compile platform '$CROSS_PLATFORM'" + ;; + esac +else + echo "Compiling release archive for $OS_RELEASE_ID" + make podman-release +fi diff --git a/contrib/cirrus/cache_release_archive.sh b/contrib/cirrus/cache_release_archive.sh deleted file mode 100755 index 2365f7593..000000000 --- a/contrib/cirrus/cache_release_archive.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/bin/bash - -set -eo pipefail - -source $(dirname $0)/lib.sh - -req_env_var GOSRC - -RELEASE_ARCHIVE_NAMES="" - -handle_archive() { # Assumed to be called with set +e - TASK_NUMBER=$1 - PR_OR_BRANCH=$2 - CACHE_URL=$3 - ARCHIVE_NAME="$(basename $CACHE_URL)" - req_env_var TASK_NUMBER PR_OR_BRANCH CACHE_URL ARCHIVE_NAME - - cd /tmp - curl -sO "$CACHE_URL" || return $(warn 0 "Couldn't download file, skipping.") - [[ -r "/tmp/$ARCHIVE_NAME" ]] || return $(warn 0 "Unreadable archive '/tmp/$ARCHIVE_NAME', skipping.") - - ZIPCOMMENT=$(unzip -qqz "$ARCHIVE_NAME" 2>/dev/null) # noisy bugger - if [[ "$?" -ne "0" ]] || [[ -z "$ZIPCOMMENT" ]] - then - return $(warn 0 "Could not unzip metadata from downloaded '/tmp/$ARCHIVE_NAME', skipping.") - fi - - RELEASE_INFO=$(echo "$ZIPCOMMENT" | grep -m 1 'X-RELEASE-INFO:' | sed -r -e 's/X-RELEASE-INFO:\s*(.+)/\1/') - if [[ "$?" -ne "0" ]] || [[ -z "$RELEASE_INFO" ]] - then - return $(warn 0 "Metadata empty or invalid: '$ZIPCOMMENT', skipping.") - fi - - # e.g. libpod v1.3.1-166-g60df124e fedora 29 amd64 - # or libpod v1.3.1-166-g60df124e amd64 - FIELDS="RELEASE_BASENAME RELEASE_VERSION RELEASE_DIST RELEASE_DIST_VER RELEASE_ARCH" - read $FIELDS <<< $RELEASE_INFO - for f in $FIELDS - do - [[ -n "${!f}" ]] || return $(warn 0 "Expecting $f to be non-empty in metadata: '$RELEASE_INFO', skipping.") - done - - echo -n "Preparing $RELEASE_BASENAME archive: " - # Drop version number to enable "latest" representation - # (version available w/in zip-file comment) - RELEASE_ARCHIVE_NAME="${RELEASE_BASENAME}-${PR_OR_BRANCH}-${RELEASE_DIST}-${RELEASE_DIST_VER}-${RELEASE_ARCH}.zip" - # Allow uploading all gathered files in parallel, later with gsutil. - mv -v "$ARCHIVE_NAME" "/$RELEASE_ARCHIVE_NAME" - RELEASE_ARCHIVE_NAMES="$RELEASE_ARCHIVE_NAMES $RELEASE_ARCHIVE_NAME" -} - -make_release() { - ARCHIVE_NAME="$1" - req_env_var ARCHIVE_NAME - - # There's no actual testing of windows/darwin targets yet - # but we still want to cross-compile and publish binaries - if [[ "$SPECIALMODE" == "windows" ]] || [[ "$SPECIALMODE" == "darwin" ]] - then - RELFILE="podman-remote-${SPECIALMODE}.zip" - elif [[ "$SPECIALMODE" == "none" ]] - then - RELFILE="podman.zip" - else - die 55 "$(basename $0) unable to handle \$SPECIALMODE=$SPECIALMODE for $ARCHIVE_NAME" - fi - echo "Calling make $RELFILE" - cd $GOSRC - make "$RELFILE" - echo "Renaming archive so it can be identified/downloaded for publishing" - mv -v "$RELFILE" "$ARCHIVE_NAME" - echo "Success!" -} - -[[ "$CI" == "true" ]] || \ - die 56 "$0 requires a Cirrus-CI cross-task cache to function" - -cd $GOSRC -# Same script re-used for both uploading and downloading to avoid duplication -if [[ "$(basename $0)" == "cache_release_archive.sh" ]] -then - # ref: https://cirrus-ci.org/guide/writing-tasks/#environment-variables - req_env_var CI_NODE_INDEX CIRRUS_BUILD_ID - # Use unique names for uncache_release_archives.sh to find/download them all - ARCHIVE_NAME="build-${CIRRUS_BUILD_ID}-task-${CI_NODE_INDEX}.zip" - make_release "$ARCHIVE_NAME" - - # ref: https://cirrus-ci.org/guide/writing-tasks/#http-cache - URL="http://$CIRRUS_HTTP_CACHE_HOST/${ARCHIVE_NAME}" - echo "Uploading $ARCHIVE_NAME to Cirrus-CI cache at $URL" - curl -s -X POST --data-binary "@$ARCHIVE_NAME" "$URL" -elif [[ "$(basename $0)" == "uncache_release_archives.sh" ]] -then - req_env_var CIRRUS_BUILD_ID CI_NODE_TOTAL GCPJSON GCPNAME GCPROJECT - [[ "${CI_NODE_INDEX}" -eq "$[CI_NODE_TOTAL-1]" ]] || \ - die 0 "WARNING: This task depends on cache data from other tasks, otherwise it is a no-op." - - if [[ -n "$CIRRUS_PR" ]] - then - PR_OR_BRANCH="pr$CIRRUS_PR" - BUCKET="libpod-pr-releases" - elif [[ -n "$CIRRUS_BRANCH" ]] - then - PR_OR_BRANCH="$CIRRUS_BRANCH" - BUCKET="libpod-$CIRRUS_BRANCH-releases" - else - die 10 "Expecting either \$CIRRUS_PR or \$CIRRUS_BRANCH to be non-empty." - fi - - echo "Blindly downloading Cirrus-CI cache files for task (some will fail)." - set +e # Don't stop looping until all task's cache is attempted - for (( task_number = 0 ; task_number < $CI_NODE_TOTAL ; task_number++ )) - do - ARCHIVE_NAME="build-${CIRRUS_BUILD_ID}-task-${task_number}.zip" - URL="http://$CIRRUS_HTTP_CACHE_HOST/${ARCHIVE_NAME}" - echo "Attempting to download cached archive from $URL" - handle_archive "$task_number" "$PR_OR_BRANCH" "$URL" - echo "----------------------------------------" - done - set -e - - [[ -n "$RELEASE_ARCHIVE_NAMES" ]] || \ - die 67 "Error: No release archives found in CI cache, expecting at least one." - - echo "Preparing to upload release archives." - gcloud config set project "$GCPROJECT" - echo "$GCPJSON" > /tmp/gcp.json - gcloud auth activate-service-account --key-file=/tmp/gcp.json - rm /tmp/gcp.json - # handle_archive() placed all uploadable files under / - gsutil -m cp /*.zip "gs://$BUCKET" # Upload in parallel - echo "Successfully uploaded archives:" - for ARCHIVE_NAME in $RELEASE_ARCHIVE_NAMES - do - echo " https://storage.cloud.google.com/$BUCKET/$ARCHIVE_NAME" - done - echo "These will remain available until automatic pruning by bucket policy." -else - die 9 "I don't know what to do when called $0" -fi diff --git a/contrib/cirrus/cirrus_yaml_test.py b/contrib/cirrus/cirrus_yaml_test.py index c8faee65f..c2ff8e69e 100755 --- a/contrib/cirrus/cirrus_yaml_test.py +++ b/contrib/cirrus/cirrus_yaml_test.py @@ -26,7 +26,6 @@ class TestCaseBase(unittest.TestCase): class TestDependsOn(TestCaseBase): ALL_TASK_NAMES = None - SUCCESS_RELEASE = set(['success', 'release']) def setUp(self): super().setUp() @@ -34,34 +33,22 @@ class TestDependsOn(TestCaseBase): for key, _ in self.CIRRUS_YAML.items() if key.endswith('_task')]) - def test_dicts(self): + def test_00_dicts(self): """Expected dictionaries are present and non-empty""" - for name in ('success_task', 'release_task'): - # tests all names then show specific failures - with self.subTest(name=name): - self.assertIn(name, self.CIRRUS_YAML) - self.assertIn(name.replace('_task', ''), self.ALL_TASK_NAMES) - self.assertIn('depends_on', self.CIRRUS_YAML[name]) - self.assertGreater(len(self.CIRRUS_YAML[name]['depends_on']), 0) - - def _check_dep(self, name, task_name, deps): - # name includes '_task' suffix, task_name does not - msg=('Please add "{0}" to the "depends_on" list in "{1}"' - "".format(task_name, name)) - self.assertIn(task_name, deps, msg=msg) - - def test_depends(self): - """Success and Release tasks depend on all other tasks""" - for name in ('success_task', 'release_task'): - deps = set(self.CIRRUS_YAML[name]['depends_on']) - for task_name in self.ALL_TASK_NAMES - self.SUCCESS_RELEASE: - with self.subTest(name=name, task_name=task_name): - self._check_dep(name, task_name, deps) - - def test_release(self): - """Release task must always execute last""" - deps = set(self.CIRRUS_YAML['release_task']['depends_on']) - self._check_dep('release_task', 'success', deps) + self.assertIn('success_task', self.CIRRUS_YAML) + self.assertIn('success_task'.replace('_task', ''), self.ALL_TASK_NAMES) + self.assertIn('depends_on', self.CIRRUS_YAML['success_task']) + self.assertGreater(len(self.CIRRUS_YAML['success_task']['depends_on']), 0) + + def test_01_depends(self): + """Success task depends on all other tasks""" + success_deps = set(self.CIRRUS_YAML['success_task']['depends_on']) + for task_name in self.ALL_TASK_NAMES - set(['success']): + with self.subTest(task_name=task_name): + msg=('Please add "{0}" to the "depends_on" list in "success_task"' + "".format(task_name)) + self.assertIn(task_name, success_deps, msg=msg) + if __name__ == "__main__": diff --git a/contrib/cirrus/git_authors_to_irc_nicks.csv b/contrib/cirrus/git_authors_to_irc_nicks.csv new file mode 100644 index 000000000..4334b5cd2 --- /dev/null +++ b/contrib/cirrus/git_authors_to_irc_nicks.csv @@ -0,0 +1,8 @@ +# Comma separated mapping of author e-mail, to Freenode IRC nick. +# When no match is found here, the username portion of the e-mail is used. +# Sorting is done at runtime - first-found e-mail match wins. +# Comments (like this) and blank lines are ignored. + +rothberg@redhat.com,vrothberg +santiago@redhat.com,edsantiago +gscrivan@redhat.com,giuseppe diff --git a/contrib/cirrus/integration_test.sh b/contrib/cirrus/integration_test.sh index e5de518fa..552f2ba73 100755 --- a/contrib/cirrus/integration_test.sh +++ b/contrib/cirrus/integration_test.sh @@ -48,6 +48,12 @@ case "$SPECIALMODE" in make test-binaries make local${TESTSUITE} ;; + endpoint) + make + make install PREFIX=/usr ETCDIR=/etc + make test-binaries + make endpoint + ;; none) make make install PREFIX=/usr ETCDIR=/etc @@ -59,10 +65,6 @@ case "$SPECIALMODE" in make local${TESTSUITE} fi ;; - windows) ;& # for podman-remote building only - darwin) - warn '' "No $SPECIALMODE remote client integration tests configured" - ;; *) die 110 "Unsupported \$SPECIALMODE: $SPECIALMODE" esac diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index a20ee5a62..cd8b2ef61 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -28,11 +28,12 @@ CIRRUS_WORKING_DIR="${CIRRUS_WORKING_DIR:-$GOPATH/src/github.com/containers/libp export GOSRC="${GOSRC:-$CIRRUS_WORKING_DIR}" export PATH="$HOME/bin:$GOPATH/bin:/usr/local/bin:$PATH" export LD_LIBRARY_PATH="/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}" -TIMESTAMPS_FILEPATH="${TIMESTAMPS_FILEPATH:-/var/tmp/timestamps}" -SETUP_MARKER_FILEPATH="${SETUP_MARKER_FILEPATH:-/var/tmp/.setup_environment_sh_complete}" # Saves typing / in case location ever moves SCRIPT_BASE=${SCRIPT_BASE:-./contrib/cirrus} PACKER_BASE=${PACKER_BASE:-./contrib/cirrus/packer} +# Important filepaths +SETUP_MARKER_FILEPATH="${SETUP_MARKER_FILEPATH:-/var/tmp/.setup_environment_sh_complete}" +AUTHOR_NICKS_FILEPATH="${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/git_authors_to_irc_nicks.csv" cd $GOSRC if type -P git &> /dev/null @@ -64,6 +65,8 @@ export PRIOR_FEDORA_BASE_IMAGE="fedora-cloud-base-29-1-2-1559164849" export BUILT_IMAGE_SUFFIX="${BUILT_IMAGE_SUFFIX:--$CIRRUS_REPO_NAME-${CIRRUS_BUILD_ID}}" # IN_PODMAN container image IN_PODMAN_IMAGE="quay.io/libpod/in_podman:latest" +# Image for uploading releases +UPLDREL_IMAGE="quay.io/libpod/upldrel:latest" # Avoid getting stuck waiting for user input export DEBIAN_FRONTEND="noninteractive" @@ -76,7 +79,7 @@ BIGTO="timeout_attempt_delay_command 300s 5 30s" # Safe env. vars. to transfer from root -> $ROOTLESS_USER (go env handled separetly) ROOTLESS_ENV_RE='(CIRRUS_.+)|(ROOTLESS_.+)|(.+_IMAGE.*)|(.+_BASE)|(.*DIRPATH)|(.*FILEPATH)|(SOURCE.*)|(DEPEND.*)|(.+_DEPS_.+)|(OS_REL.*)|(.+_ENV_RE)|(TRAVIS)|(CI.+)|(TEST_REMOTE.*)' # Unsafe env. vars for display -SECRET_ENV_RE='(IRCID)|(ACCOUNT)|(^GC[EP]..+)|(SSH)' +SECRET_ENV_RE='(IRCID)|(ACCOUNT)|(GC[EP]..+)|(SSH)' # Names of systemd units which should never be running EVIL_UNITS="cron crond atd apt-daily-upgrade apt-daily fstrim motd-news systemd-tmpfiles-clean" @@ -321,13 +324,15 @@ EOF install_test_configs(){ echo "Installing cni config, policy and registry config" - req_env_var GOSRC - sudo install -D -m 755 $GOSRC/cni/87-podman-bridge.conflist \ - /etc/cni/net.d/87-podman-bridge.conflist - sudo install -D -m 755 $GOSRC/test/policy.json \ - /etc/containers/policy.json - sudo install -D -m 755 $GOSRC/test/registries.conf \ - /etc/containers/registries.conf + req_env_var GOSRC SCRIPT_BASE + cd $GOSRC + install -v -D -m 644 ./cni/87-podman-bridge.conflist /etc/cni/net.d/ + # This config must always sort last in the list of networks (podman picks first one + # as the default). This config prevents allocation of network address space used + # by default in google cloud. https://cloud.google.com/vpc/docs/vpc#ip-ranges + install -v -D -m 644 $SCRIPT_BASE/99-do-not-use-google-subnets.conflist /etc/cni/net.d/ + install -v -D -m 644 ./test/policy.json /etc/containers/ + install -v -D -m 644 ./test/registries.conf /etc/containers/ } # Remove all files (except conmon, for now) provided by the distro version of podman. diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index 7b6765f8a..7c7659169 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -44,11 +44,15 @@ case "${OS_REL_VER}" in ;; fedora-30) ;& # continue to next item fedora-29) + # All SELinux distros need this for systemd-in-a-container + setsebool container_manage_cgroup true if [[ "$ADD_SECOND_PARTITION" == "true" ]]; then bash "$SCRIPT_BASE/add_second_partition.sh"; fi ;; centos-7) # Current VM is an image-builder-image no local podman/testing - echo "No further setup required for VM image building" + echo "No further setup required for VM image building" + # All SELinux distros need this for systemd-in-a-container + setsebool container_manage_cgroup true exit 0 ;; *) bad_os_id_ver ;; @@ -57,8 +61,7 @@ esac # Reload to incorporate any changes from above source "$SCRIPT_BASE/lib.sh" -install_test_configs - +# Must execute before possible setup_rootless() make install.tools case "$SPECIALMODE" in @@ -66,6 +69,10 @@ case "$SPECIALMODE" in remove_packaged_podman_files # we're building from source ;; none) + [[ -n "$CROSS_PLATFORM" ]] || \ + remove_packaged_podman_files + ;; + endpoint) remove_packaged_podman_files ;; rootless) @@ -85,8 +92,8 @@ case "$SPECIALMODE" in in_podman) # Assumed to be Fedora $SCRIPT_BASE/setup_container_environment.sh ;; - windows) ;& # for podman-remote building only - darwin) ;; *) die 111 "Unsupported \$SPECIALMODE: $SPECIALMODE" esac + +install_test_configs diff --git a/contrib/cirrus/success.sh b/contrib/cirrus/success.sh index c4e150514..f2c9fbc7f 100755 --- a/contrib/cirrus/success.sh +++ b/contrib/cirrus/success.sh @@ -4,14 +4,52 @@ set -e source $(dirname $0)/lib.sh -req_env_var CIRRUS_BRANCH CIRRUS_BUILD_ID +req_env_var CIRRUS_BRANCH CIRRUS_BUILD_ID CIRRUS_REPO_FULL_NAME -REF=$(basename $CIRRUS_BRANCH) # PR number or branch named -URL="https://cirrus-ci.com/build/$CIRRUS_BUILD_ID" +cd $CIRRUS_WORKING_DIR if [[ "$CIRRUS_BRANCH" =~ "pull" ]] then - ircmsg "Cirrus-CI testing successful for PR #$REF: $URL" + echo "Finding commit authors for PR $CIRRUS_PR" + unset NICKS + if [[ -r "$AUTHOR_NICKS_FILEPATH" ]] + then + SHARANGE="${CIRRUS_BASE_SHA}..${CIRRUS_CHANGE_IN_REPO}" + EXCLUDE_RE='merge-robot' + AUTHOR_NICKS=$(egrep -v '(^[[:space:]]*$)|(^[[:space:]]*#)' "$AUTHOR_NICKS_FILEPATH" | sort -u) + # Depending on branch-state, it's possible SHARANGE could be _WAY_ too big + MAX_NICKS=10 + # newline separated + COMMIT_AUTHORS=$(git log --format='%ae' $SHARANGE | \ + sort -u | \ + egrep -v "$EXCLUDE_RE" | \ + tail -$MAX_NICKS) + + for c_email in $COMMIT_AUTHORS + do + echo -e "\tExamining $c_email" + NICK=$(echo "$AUTHOR_NICKS" | grep -m 1 "$c_email" | \ + awk --field-separator ',' '{print $2}' | tr -d '[[:blank:]]') + if [[ -n "$NICK" ]] + then + echo -e "\t\tFound $c_email -> $NICK in $(basename $AUTHOR_NICKS_FILEPATH)" + else + echo -e "\t\tNot found in $(basename $AUTHOR_NICKS_FILEPATH), using e-mail username." + NICK=$(echo "$c_email" | cut -d '@' -f 1) + fi + echo -e "\tUsing nick $NICK" + NICKS="${NICKS:+$NICKS, }$NICK" + done + fi + + unset MENTION_PREFIX + [[ -z "$NICKS" ]] || \ + MENTION_PREFIX="$NICKS: " + + URL="https://github.com/$CIRRUS_REPO_FULL_NAME/pull/$CIRRUS_PR" + PR_SUBJECT=$(echo "$CIRRUS_CHANGE_MESSAGE" | head -1) + ircmsg "${MENTION_PREFIX}Cirrus-CI testing successful for PR '$PR_SUBJECT': $URL" else - ircmsg "Cirrus-CI testing branch $REF successful: $URL" + URL="https://cirrus-ci.com/github/containers/libpod/$CIRRUS_BRANCH" + ircmsg "Cirrus-CI testing branch $(basename $CIRRUS_BRANCH) successful: $URL" fi diff --git a/contrib/cirrus/uncache_release_archives.sh b/contrib/cirrus/uncache_release_archives.sh deleted file mode 120000 index e9fc6edff..000000000 --- a/contrib/cirrus/uncache_release_archives.sh +++ /dev/null @@ -1 +0,0 @@ -cache_release_archive.sh
\ No newline at end of file diff --git a/contrib/cirrus/unit_test.sh b/contrib/cirrus/unit_test.sh index 004839f17..c6c77d17e 100755 --- a/contrib/cirrus/unit_test.sh +++ b/contrib/cirrus/unit_test.sh @@ -16,10 +16,6 @@ case "$SPECIALMODE" in none) make ;; - windows) ;& - darwin) - make podman-remote-$SPECIALMODE - ;; *) die 109 "Unsupported \$SPECIAL_MODE: $SPECIALMODE" esac diff --git a/contrib/cirrus/upload_release_archive.sh b/contrib/cirrus/upload_release_archive.sh new file mode 100755 index 000000000..942255821 --- /dev/null +++ b/contrib/cirrus/upload_release_archive.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +set -eo pipefail + +source $(dirname $0)/lib.sh + +req_env_var CI UPLDREL_IMAGE CIRRUS_BUILD_ID GOSRC RELEASE_GCPJSON RELEASE_GCPNAME RELEASE_GCPROJECT + +[[ "$CI" == "true" ]] || \ + die 56 "$0 must be run under Cirrus-CI to function" + +unset PR_OR_BRANCH BUCKET +if [[ -n "$CIRRUS_PR" ]] +then + PR_OR_BRANCH="pr$CIRRUS_PR" + BUCKET="libpod-pr-releases" +elif [[ -n "$CIRRUS_BRANCH" ]] +then + PR_OR_BRANCH="$CIRRUS_BRANCH" + BUCKET="libpod-$CIRRUS_BRANCH-releases" +else + die 1 "Expecting either \$CIRRUS_PR or \$CIRRUS_BRANCH to be non-empty." +fi + +# Functional local podman required for uploading a release +cd $GOSRC +[[ -n "$(type -P podman)" ]] || \ + make install || \ + die 57 "$0 requires working podman binary on path to function" + +TMPF=$(mktemp -p '' $(basename $0)_XXXX.json) +trap "rm -f $TMPF" EXIT +set +x +echo "$RELEASE_GCPJSON" > "$TMPF" +unset RELEASE_GCPJSON + +cd $GOSRC +for filename in $(ls -1 *.tar.gz *.zip) +do + echo "Running podman ... $UPLDREL_IMAGE $filename" + podman run -i --rm \ + -e "GCPNAME=$RELEASE_GCPNAME" \ + -e "GCPPROJECT=$RELEASE_GCPROJECT" \ + -e "GCPJSON_FILEPATH=$TMPF" \ + -e "REL_ARC_FILEPATH=/tmp/$filename" \ + -e "PR_OR_BRANCH=$PR_OR_BRANCH" \ + -e "BUCKET=$BUCKET" \ + --security-opt label=disable \ + -v "$TMPF:$TMPF:ro" \ + -v "$GOSRC/$filename:/tmp/$filename:ro" \ + $UPLDREL_IMAGE +done diff --git a/contrib/imgprune/entrypoint.sh b/contrib/imgprune/entrypoint.sh index a4b77523b..829e9938e 100755 --- a/contrib/imgprune/entrypoint.sh +++ b/contrib/imgprune/entrypoint.sh @@ -6,27 +6,49 @@ source /usr/local/bin/lib_entrypoint.sh req_env_var GCPJSON GCPNAME GCPPROJECT IMGNAMES +BASE_IMAGES="" +# When executing under Cirrus-CI, have access to current source +if [[ "$CI" == "true" ]] && [[ -r "$CIRRUS_WORKING_DIR/$SCRIPT_BASE" ]] +then + # Avoid importing anything that might conflict + eval "$(egrep -sh '^export .+BASE_IMAGE=' < $CIRRUS_WORKING_DIR/$SCRIPT_BASE/lib.sh)" + BASE_IMAGES="$UBUNTU_BASE_IMAGE $PRIOR_UBUNTU_BASE_IMAGE $FEDORA_BASE_IMAGE $PRIOR_FEDORA_BASE_IMAGE" +else + # metadata labeling may have broken for some reason in the future + echo "Warning: Running outside of Cirrus-CI, very minor-risk of base-image deletion." +fi + gcloud_init # For safety's sake + limit nr background processes -PRUNE_LIMIT=10 +PRUNE_LIMIT=5 THEFUTURE=$(date --date='+1 hour' +%s) -TOO_OLD='90 days ago' +TOO_OLD='30 days ago' THRESHOLD=$(date --date="$TOO_OLD" +%s) # Format Ref: https://cloud.google.com/sdk/gcloud/reference/topic/formats FORMAT='value[quote](name,selfLink,creationTimestamp,labels)' PROJRE="/v1/projects/$GCPPROJECT/global/" -BASE_IMAGE_RE='cloud-base' -RECENTLY=$(date --date='30 days ago' --iso-8601=date) -EXCLUDE="$IMGNAMES $IMAGE_BUILDER_CACHE_IMAGE_NAME" # whitespace separated values +RECENTLY=$(date --date='3 days ago' --iso-8601=date) # Filter Ref: https://cloud.google.com/sdk/gcloud/reference/topic/filters -FILTER="selfLink~$PROJRE AND creationTimestamp<$RECENTLY AND NOT name=($EXCLUDE)" +FILTER="selfLink~$PROJRE AND creationTimestamp<$RECENTLY AND NOT name=($IMGNAMES $BASE_IMAGES)" TODELETE=$(mktemp -p '' todelete.XXXXXX) +IMGCOUNT=$(mktemp -p '' imgcount.XXXXXX) + +# Search-loop runs in a sub-process, must store count in file +echo "0" > "$IMGCOUNT" +count_image() { + local count + count=$(<"$IMGCOUNT") + let 'count+=1' + echo "$count" > "$IMGCOUNT" +} -echo "Searching images for pruning candidates older than $TOO_OLD ($THRESHOLD):" +echo "Using filter: $FILTER" +echo "Searching images for pruning candidates older than $TOO_OLD ($(date --date="$TOO_OLD" --iso-8601=date)):" $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \ while read name selfLink creationTimestamp labels do + count_image created_ymd=$(date --date=$creationTimestamp --iso-8601=date) last_used=$(egrep --only-matching --max-count=1 'last-used=[[:digit:]]+' <<< $labels || true) markmsgpfx="Marking $name (created $created_ymd) for deletion" @@ -52,16 +74,29 @@ $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \ echo "$name" >> $TODELETE continue fi - - echo "NOT $markmsgpfx: last used on $last_used_ymd)" done -echo "Pruning up to $PRUNE_LIMIT images that were marked for deletion:" -for image_name in $(tail -$PRUNE_LIMIT $TODELETE | sort --random-sort) +COUNT=$(<"$IMGCOUNT") +echo "########################################################################" +echo "Deleting up to $PRUNE_LIMIT images marked ($(wc -l < $TODELETE)) of all searched ($COUNT):" + +# Require a minimum number of images to exist +NEED="$[$PRUNE_LIMIT*2]" +if [[ "$COUNT" -lt "$NEED" ]] +then + die 0 Safety-net Insufficient images \($COUNT\) to process deletions \($NEED\) + exit 0 +fi + +for image_name in $(sort --random-sort $TODELETE | tail -$PRUNE_LIMIT) do - # This can take quite some time (minutes), run in parallel disconnected from terminal - echo "TODO: Would have: $GCLOUD compute images delete $image_name &" - sleep "$[1+RANDOM/1000]s" & # Simlate background operation + if echo "$IMGNAMES $BASE_IMAGES" | grep -q "$image_name" + then + # double-verify in-use images were filtered out in search loop above + die 8 FATAL ATTEMPT TO DELETE IN-USE IMAGE \'$image_name\' - THIS SHOULD NEVER HAPPEN + fi + echo "Deleting $image_name in parallel..." + $GCLOUD compute images delete $image_name & done wait || true # Nothing to delete: No background jobs diff --git a/contrib/imgts/lib_entrypoint.sh b/contrib/imgts/lib_entrypoint.sh index 7b76c823f..3f6b11128 100644 --- a/contrib/imgts/lib_entrypoint.sh +++ b/contrib/imgts/lib_entrypoint.sh @@ -35,10 +35,15 @@ req_env_var() { gcloud_init() { set +xe - TMPF=$(mktemp -p '' .$(uuidgen)XXXX) - trap "rm -f $TMPF" EXIT - echo "$GCPJSON" > $TMPF && \ - $GCLOUD auth activate-service-account --project "$GCPPROJECT" --key-file=$TMPF || \ + if [[ -n "$1" ]] && [[ -r "$1" ]] + then + TMPF="$1" + else + TMPF=$(mktemp -p '' .$(uuidgen)_XXXX.json) + trap "rm -f $TMPF &> /dev/null" EXIT + echo "$GCPJSON" > $TMPF + fi + $GCLOUD auth activate-service-account --project="$GCPPROJECT" --key-file="$TMPF" || \ die 5 FATAL auth - rm -f $TMPF + rm -f $TMPF &> /dev/null || true # ignore any read-only error } diff --git a/contrib/upldrel/Dockerfile b/contrib/upldrel/Dockerfile new file mode 100644 index 000000000..54a58c521 --- /dev/null +++ b/contrib/upldrel/Dockerfile @@ -0,0 +1,9 @@ +FROM quay.io/libpod/imgts:latest + +RUN yum -y update && \ + yum -y install unzip && \ + rpm -V unzip && \ + yum clean all + +COPY /contrib/upldrel/entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod 755 /usr/local/bin/entrypoint.sh diff --git a/contrib/upldrel/README.md b/contrib/upldrel/README.md new file mode 100644 index 000000000..41f5ffef0 --- /dev/null +++ b/contrib/upldrel/README.md @@ -0,0 +1,9 @@ +![PODMAN logo](../../logo/podman-logo-source.svg) + +A container image for canonical-naming and uploading of +libpod and remote-client archives. Only intended to ever +be used by CI/CD, and depends heavily on an embedded +`release.txt` file produced by `make`. + +Build script: [../cirrus/build_release.sh](../cirrus/build_release.sh) +Upload script: [../cirrus/upload_release_archive.sh](../cirrus/upload_release_archive.sh) diff --git a/contrib/upldrel/entrypoint.sh b/contrib/upldrel/entrypoint.sh new file mode 100755 index 000000000..985b828a0 --- /dev/null +++ b/contrib/upldrel/entrypoint.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +set -e + +source /usr/local/bin/lib_entrypoint.sh + +req_env_var GCPJSON_FILEPATH GCPNAME GCPPROJECT REL_ARC_FILEPATH PR_OR_BRANCH BUCKET + +[[ -r "$REL_ARC_FILEPATH" ]] || \ + die 2 ERROR Cannot read release archive file: "$REL_ARC_FILEPATH" + +[[ -r "$GCPJSON_FILEPATH" ]] || \ + die 3 ERROR Cannot read GCP credentials file: "$GCPJSON_FILEPATH" + +cd $TMPDIR +echo "Attempting to extract release.txt from tar or zip $REL_ARC_FILEPATH" +unset SFX +if tar xzf "$REL_ARC_FILEPATH" "./release.txt" +then + echo "It's a tarball" + SFX="tar.gz" +elif unzip "$REL_ARC_FILEPATH" release.txt +then + echo "It's a zip" + SFX="zip" +else + die 5 ERROR Could not extract release.txt from $REL_ARC_FILEPATH +fi + +echo "Parsing release.txt contents" +RELEASETXT=$(<release.txt) +cd - +[[ -n "$RELEASETXT" ]] || \ + die 3 ERROR Could not obtain metadata from release.txt in $REL_ARC_FILEPATH + +RELEASE_INFO=$(echo "$RELEASETXT" | grep -m 1 'X-RELEASE-INFO:' | sed -r -e 's/X-RELEASE-INFO:\s*(.+)/\1/') +if [[ "$?" -ne "0" ]] || [[ -z "$RELEASE_INFO" ]] +then + die 4 ERROR Metadata is empty or invalid: '$RELEASETXT' +fi + +# e.g. libpod v1.3.1-166-g60df124e fedora 29 amd64 +# or libpod v1.3.1-166-g60df124e amd64 +FIELDS="RELEASE_BASENAME RELEASE_VERSION RELEASE_DIST RELEASE_DIST_VER RELEASE_ARCH" +read $FIELDS <<< $RELEASE_INFO +for f in $FIELDS +do + [[ -n "${!f}" ]] || \ + die 5 ERROR Expecting $f to be non-empty in metadata: '$RELEASE_INFO' +done + +gcloud_init "$GCPJSON_FILEPATH" + +# Drop version number to enable "latest" representation +# (version available w/in zip-file comment) +RELEASE_ARCHIVE_NAME="${RELEASE_BASENAME}-${PR_OR_BRANCH}-${RELEASE_DIST}-${RELEASE_DIST_VER}-${RELEASE_ARCH}.${SFX}" + +echo "Uploading archive as $RELEASE_ARCHIVE_NAME" +gsutil cp "$REL_ARC_FILEPATH" "gs://$BUCKET/$RELEASE_ARCHIVE_NAME" + +echo "Release now available at:" +echo " https://storage.cloud.google.com/$BUCKET/$RELEASE_ARCHIVE_NAME" diff --git a/docs/links/podman-container-umount.1 b/docs/links/podman-container-umount.1 index dadc63113..789dabbb0 100644 --- a/docs/links/podman-container-umount.1 +++ b/docs/links/podman-container-umount.1 @@ -1 +1 @@ -.so man1/podman-umount,.1 +.so man1/podman-umount.1 diff --git a/docs/links/podman-container-unmount.1 b/docs/links/podman-container-unmount.1 index dadc63113..789dabbb0 100644 --- a/docs/links/podman-container-unmount.1 +++ b/docs/links/podman-container-unmount.1 @@ -1 +1 @@ -.so man1/podman-umount,.1 +.so man1/podman-umount.1 diff --git a/docs/podman-generate-systemd.1.md b/docs/podman-generate-systemd.1.md index ea72fdfae..b4962f28b 100644 --- a/docs/podman-generate-systemd.1.md +++ b/docs/podman-generate-systemd.1.md @@ -4,16 +4,20 @@ podman-generate-systemd- Generate Systemd Unit file ## SYNOPSIS -**podman generate systemd** [*options*] *container* +**podman generate systemd** [*options*] *container|pod* ## DESCRIPTION -**podman generate systemd** will create a Systemd unit file that can be used to control a container. The -command will dynamically create the unit file and output it to stdout where it can be piped by the user -to a file. The options can be used to influence the results of the output as well. +**podman generate systemd** will create a systemd unit file that can be used to control a container or pod. +By default, the command will print the content of the unit files to stdout. +Note that this command is not supported for the remote client. ## OPTIONS: +**--files**, **-f** + +Generate files instead of printing to stdout. The generated files are named {container,pod}-{ID,name}.service and will be placed in the current working directory. + **--name**, **-n** Use the name of the container for the start, stop, and description in the unit file @@ -27,41 +31,66 @@ Set the systemd restart policy. The restart-policy must be one of: "no", "on-su "on-watchdog", "on-abort", or "always". The default policy is *on-failure*. ## Examples -Create a systemd unit file for a container running nginx: +Create and print a systemd unit file for a container running nginx with an *always* restart policy and 1-second timeout to stdout. ``` -$ sudo podman generate systemd nginx +$ podman create --name nginx nginx:latest +$ podman generate systemd --restart-policy=always -t 1 nginx +# container-de1e3223b1b888bc02d0962dd6cb5855eb00734061013ffdd3479d225abacdc6.service +# autogenerated by Podman 1.5.2 +# Wed Aug 21 09:46:45 CEST 2019 + [Unit] -Description=c21da63c4783be2ac2cd3487ef8d2ec15ee2a28f63dd8f145e3b05607f31cffc Podman Container +Description=Podman container-de1e3223b1b888bc02d0962dd6cb5855eb00734061013ffdd3479d225abacdc6.service +Documentation=man:podman-generate-systemd(1) + [Service] -Restart=on-failure -ExecStart=/usr/bin/podman start c21da63c4783be2ac2cd3487ef8d2ec15ee2a28f63dd8f145e3b05607f31cffc -ExecStop=/usr/bin/podman stop -t 10 c21da63c4783be2ac2cd3487ef8d2ec15ee2a28f63dd8f145e3b05607f31cffc +Restart=always +ExecStart=/usr/bin/podman start de1e3223b1b888bc02d0962dd6cb5855eb00734061013ffdd3479d225abacdc6 +ExecStop=/usr/bin/podman stop -t 1 de1e3223b1b888bc02d0962dd6cb5855eb00734061013ffdd3479d225abacdc6 KillMode=none Type=forking -PIDFile=/var/run/containers/storage/overlay-containers/c21da63c4783be2ac2cd3487ef8d2ec15ee2a28f63dd8f145e3b05607f31cffc/userdata/conmon.pid +PIDFile=/run/user/1000/overlay-containers/de1e3223b1b888bc02d0962dd6cb5855eb00734061013ffdd3479d225abacdc6/userdata/conmon.pid + [Install] WantedBy=multi-user.target ``` -Create a systemd unit file for a container running nginx with an *always* restart policy and 1-second timeout. +Create systemd unit files for a pod with two simple alpine containers. Note that these container services cannot be started or stopped individually via `systemctl`; they are managed by the pod service. You can still use `systemctl status` or journalctl to examine them. ``` -$ sudo podman generate systemd --restart-policy=always -t 1 nginx +$ podman pod create --name systemd-pod +$ podman create --pod systemd-pod alpine top +$ podman create --pod systemd-pod alpine top +$ podman generate systemd --files --name systemd-pod +/home/user/pod-systemd-pod.service +/home/user/container-amazing_chandrasekhar.service +/home/user/container-jolly_shtern.service +$ cat pod-systemd-pod.service +# pod-systemd-pod.service +# autogenerated by Podman 1.5.2 +# Wed Aug 21 09:52:37 CEST 2019 + [Unit] -Description=c21da63c4783be2ac2cd3487ef8d2ec15ee2a28f63dd8f145e3b05607f31cffc Podman Container +Description=Podman pod-systemd-pod.service +Documentation=man:podman-generate-systemd(1) +Requires=container-amazing_chandrasekhar.service container-jolly_shtern.service +Before=container-amazing_chandrasekhar.service container-jolly_shtern.service + [Service] -Restart=always -ExecStart=/usr/bin/podman start c21da63c4783be2ac2cd3487ef8d2ec15ee2a28f63dd8f145e3b05607f31cffc -ExecStop=/usr/bin/podman stop -t 1 c21da63c4783be2ac2cd3487ef8d2ec15ee2a28f63dd8f145e3b05607f31cffc +Restart=on-failure +ExecStart=/usr/bin/podman start 77a818221650-infra +ExecStop=/usr/bin/podman stop -t 10 77a818221650-infra KillMode=none Type=forking -PIDFile=/var/run/containers/storage/overlay-containers/c21da63c4783be2ac2cd3487ef8d2ec15ee2a28f63dd8f145e3b05607f31cffc/userdata/conmon.pid +PIDFile=/run/user/1000/overlay-containers/ccfd5c71a088768774ca7bd05888d55cc287698dde06f475c8b02f696a25adcd/userdata/conmon.pid + [Install] WantedBy=multi-user.target ``` ## SEE ALSO -podman(1), podman-container(1) +podman(1), podman-container(1), systemctl(1), systemd.unit(5), systemd.service(5) ## HISTORY +August 2019, Updated with pod support by Valentin Rothberg (rothberg at redhat dot com) April 2019, Originally compiled by Brent Baude (bbaude at redhat dot com) diff --git a/docs/podman-generate.1.md b/docs/podman-generate.1.md index 5a2386778..50050f2c1 100644 --- a/docs/podman-generate.1.md +++ b/docs/podman-generate.1.md @@ -11,10 +11,11 @@ The generate command will create structured output (like YAML) based on a contai ## COMMANDS -| Command | Man Page | Description | -| ------- | --------------------------------------------------- | ---------------------------------------------------------------------------- | -| kube | [podman-generate-kube(1)](podman-generate-kube.1.md)| Generate Kubernetes YAML based on a pod or container. | -| systemd | [podman-generate-systemd(1)](podman-generate-systemd.1.md)| Generate a systemd unit file for a container. | +| Command | Man Page | Description | +|---------|------------------------------------------------------------|-------------------------------------------------------------------------------------| +| kube | [podman-generate-kube(1)](podman-generate-kube.1.md) | Generate Kubernetes YAML based on a pod or container. | +| systemd | [podman-generate-systemd(1)](podman-generate-systemd.1.md) | Generate systemd unit file(s) for a container. Not supported for the remote client. | + ## SEE ALSO podman, podman-pod, podman-container diff --git a/docs/podman-image-sign.1.md b/docs/podman-image-sign.1.md index 61df3b3bd..ca438b438 100644 --- a/docs/podman-image-sign.1.md +++ b/docs/podman-image-sign.1.md @@ -12,14 +12,23 @@ been pulled from a registry. The signature will be written to a directory derived from the registry configuration files in /etc/containers/registries.d. By default, the signature will be written into /var/lib/containers/sigstore directory. ## OPTIONS + **--help**, **-h** - Print usage statement. + +Print usage statement. + +**--cert-dir**=*path* + +Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry. +Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands) **--directory**, **-d**=*dir* - Store the signatures in the specified directory. Default: /var/lib/containers/sigstore + +Store the signatures in the specified directory. Default: /var/lib/containers/sigstore **--sign-by**=*identity* - Override the default identity of the signature. + +Override the default identity of the signature. ## EXAMPLES Sign the busybox image with the identify of foo@bar.com with a user's keyring and save the signature in /tmp/signatures/. diff --git a/docs/podman-push.1.md b/docs/podman-push.1.md index 2058a432c..29e4044a3 100644 --- a/docs/podman-push.1.md +++ b/docs/podman-push.1.md @@ -61,13 +61,17 @@ value can be entered. The password is entered without echo. **--cert-dir**=*path* Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry. -Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands) (Not available for remote commands) +Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands) **--compress** Compress tarball image layers when pushing to a directory using the 'dir' transport. (default is same compression type, compressed or uncompressed, as source) Note: This flag can only be set when using the **dir** transport +**--digestfile** *Digestfile* + +After copying the image, write the digest of the resulting image to the file. (Not available for remote commands) + **--format**, **-f**=*format* Manifest Type (oci, v2s1, or v2s2) to use when pushing an image to a directory using the 'dir:' transport (default is manifest type of source) @@ -93,19 +97,23 @@ TLS verification will be used unless the target registry is listed as an insecur ## EXAMPLE -This example extracts the imageID image to a local directory in docker format. +This example pushes the image specified by the imageID to a local directory in docker format. `# podman push imageID dir:/path/to/image` -This example extracts the imageID image to a local directory in oci format. +This example pushes the image specified by the imageID to a local directory in oci format. `# podman push imageID oci-archive:/path/to/layout:image:tag` -This example extracts the imageID image to a container registry named registry.example.com +This example pushes the image specified by the imageID to a container registry named registry.example.com `# podman push imageID docker://registry.example.com/repository:tag` -This example extracts the imageID image and puts into the local docker container store +This example pushes the image specified by the imageID to a container registry named registry.example.com and saves the digest in the specified digestfile. + + `# podman push --digestfile=/tmp/mydigest imageID docker://registry.example.com/repository:tag` + +This example pushes the image specified by the imageID and puts it into the local docker container store `# podman push imageID docker-daemon:image:tag` diff --git a/docs/podman-run.1.md b/docs/podman-run.1.md index 33b5cbf9e..447d4f282 100644 --- a/docs/podman-run.1.md +++ b/docs/podman-run.1.md @@ -736,14 +736,14 @@ Note: if you use the `--network=host` option these sysctls will not be allowed. Run container in systemd mode. The default is *true*. -If the command you running inside of the container is systemd or init, podman +If the command you are running inside of the container is systemd or init, podman will setup tmpfs mount points in the following directories: /run, /run/lock, /tmp, /sys/fs/cgroup/systemd, /var/lib/journal It will also set the default stop signal to SIGRTMIN+3. -This allow systemd to run in a confined container without any modifications. +This allows systemd to run in a confined container without any modifications. Note: On `SELinux` systems, systemd attempts to write to the cgroup file system. Containers writing to the cgroup file system are denied by default. diff --git a/docs/podman.1.md b/docs/podman.1.md index 33ea81ef6..c643140a2 100644 --- a/docs/podman.1.md +++ b/docs/podman.1.md @@ -36,7 +36,7 @@ Note: CGroup manager is not supported in rootless mode when using CGroups Versio Path to where the cpu performance results should be written -**--events-logger**=*type* +**--events-backend**=*type* Backend to use for storing events. Allowed values are **file**, **journald**, and **none**. diff --git a/docs/tutorials/rootless_tutorial.md b/docs/tutorials/rootless_tutorial.md index 9453e3855..92595dd02 100644 --- a/docs/tutorials/rootless_tutorial.md +++ b/docs/tutorials/rootless_tutorial.md @@ -46,7 +46,7 @@ The format of this file is USERNAME:UID:RANGE This means the user johndoe is allocated UIDS 100000-165535 as well as their standard UID in the /etc/passwd file. NOTE: this is not currently supported with network installs. These files must be available locally to the host machine. It is not possible to configure this with LDAP or Active Directory. -If you update either the /etc/subuid or the /etc/subgid file, you need to stop all the running containers owned by the user and kill the pause process that is running on the system for that user. This can be done automatically by using the `[podman system migrate](https://github.com/containers/libpod/blob/master/docs/podman-system-migrate.1.md)` command which will stop all the containers for the user and will kill the pause process. +If you update either the /etc/subuid or the /etc/subgid file, you need to stop all the running containers owned by the user and kill the pause process that is running on the system for that user. This can be done automatically by using the [`podman system migrate`](https://github.com/containers/libpod/blob/master/docs/podman-system-migrate.1.md) command which will stop all the containers for the user and will kill the pause process. Rather than updating the files directly, the usermod program can be used to assign UIDs and GIDs to a user. @@ -20,7 +20,6 @@ require ( github.com/containers/storage v1.13.2 github.com/coreos/bbolt v1.3.3 // indirect github.com/coreos/etcd v3.3.13+incompatible // indirect - github.com/coreos/go-iptables v0.4.1 github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca diff --git a/hack/get_release_info.sh b/hack/get_release_info.sh new file mode 100755 index 000000000..29b4237b4 --- /dev/null +++ b/hack/get_release_info.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# This script produces various bits of metadata needed by Makefile. Using +# a script allows uniform behavior across multiple environments and +# distributions. The script expects a single argument, as reflected below. + +set -e + +cd "${GOSRC:-$(dirname $0)/../}" + +valid_args() { + REGEX='^\s+[[:upper:]]+\*[)]' + egrep --text --no-filename --group-separator=' ' --only-matching "$REGEX" "$0" | \ + cut -d '*' -f 1 +} + +unset OUTPUT +case "$1" in + # Wild-card suffix needed by valid_args() e.g. possible bad grep of "$(echo $FOO)" + VERSION*) + OUTPUT="${CIRRUS_TAG:-$(git fetch --tags && git describe HEAD 2> /dev/null)}" + ;; + NUMBER*) + OUTPUT="$($0 VERSION | sed 's/-.*//')" + ;; + DIST_VER*) + OUTPUT="$(source /etc/os-release; echo $VERSION_ID | cut -d '.' -f 1)" + ;; + DIST*) + OUTPUT="$(source /etc/os-release; echo $ID)" + ;; + ARCH*) + OUTPUT="${GOARCH:-$(go env GOARCH 2> /dev/null)}" + ;; + BASENAME*) + OUTPUT="${CIRRUS_REPO_NAME:-$(basename $(git rev-parse --show-toplevel))}" + ;; + REMOTENAME*) + OUTPUT="$($0 BASENAME)-remote" + ;; + *) + echo "Error, unknown/unsupported argument '$1', valid arguments:" + valid_args + exit 1 + ;; +esac + +if [[ -n "$OUTPUT" ]] +then + echo -n "$OUTPUT" +else + echo "Error, empty output for info: '$1'" > /dev/stderr + exit 2 +fi diff --git a/install.md b/install.md index d8d70a7b6..eb4ecfa68 100644 --- a/install.md +++ b/install.md @@ -190,11 +190,14 @@ To build, use the following (running `make` can take a while): git clone https://github.com/ostreedev/ostree ~/ostree cd ~/ostree git submodule update --init + # for Fedora, CentOS, RHEL -sudo yum install -y automake bison e2fsprogs-devel fuse-devel libtool xz-devel zlib-devel +sudo yum install -y automake bison e2fsprogs-devel fuse-devel gpgme-devel libseccomp-devel libtool systemd-devel xz-devel zlib-devel + # for Debian, Ubuntu etc. -sudo apt-get install -y automake bison e2fsprogs e2fslibs-dev fuse libfuse-dev libgpgme-dev liblzma-dev libtool zlib1g +sudo apt-get install -y automake bison e2fsprogs e2fslibs-dev fuse libfuse-dev libgpgme-dev liblzma-dev libseccomp-dev libsystemd-dev libtool zlib1g +# for all distributions ./autogen.sh --prefix=/usr --libdir=/usr/lib64 --sysconfdir=/etc # remove --nonet option due to https:/github.com/ostreedev/ostree/issues/1374 sed -i '/.*--nonet.*/d' ./Makefile-man.am @@ -226,6 +229,7 @@ To build from source, use the following: ```bash git clone https://github.com/containers/conmon cd conmon +export GOCACHE="$(mktemp -d)" make sudo make podman ``` @@ -245,25 +249,12 @@ sudo cp runc /usr/bin/runc #### CNI plugins -```bash -git clone https://github.com/containernetworking/plugins.git $GOPATH/src/github.com/containernetworking/plugins -cd $GOPATH/src/github.com/containernetworking/plugins -./build_linux.sh -sudo mkdir -p /usr/libexec/cni -sudo cp bin/* /usr/libexec/cni -``` - #### Setup CNI networking A proper description of setting up CNI networking is given in the [`cni` README](cni/README.md). -Using the CNI plugins from above, a more basic network config is achieved with: - -```bash -sudo mkdir -p /etc/cni/net.d -curl -qsSL https://raw.githubusercontent.com/containers/libpod/master/cni/87-podman-bridge.conflist | sudo tee /etc/cni/net.d/99-loopback.conf -``` - +A basic setup for CNI networking is done by default during the installation or make processes and +no further configuration is needed to start using Podman. #### Add configuration diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 176781f07..1de8d80c9 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -870,7 +870,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { newCfgJSON, err := json.Marshal(newCfg) if err != nil { - return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID()) + return errors.Wrapf(err, "error marshalling new configuration JSON for pod %s", pod.ID()) } db, err := s.getDBCon() @@ -900,6 +900,50 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { return err } +// RewriteVolumeConfig rewrites a volume's configuration. +// WARNING: This function is DANGEROUS. Do not use without reading the full +// comment on this function in state.go. +func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error { + if !s.valid { + return define.ErrDBClosed + } + + if !volume.valid { + return define.ErrVolumeRemoved + } + + newCfgJSON, err := json.Marshal(newCfg) + if err != nil { + return errors.Wrapf(err, "error marshalling new configuration JSON for volume %q", volume.Name()) + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + err = db.Update(func(tx *bolt.Tx) error { + volBkt, err := getVolBucket(tx) + if err != nil { + return err + } + + volDB := volBkt.Bucket([]byte(volume.Name())) + if volDB == nil { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found in DB", volume.Name()) + } + + if err := volDB.Put(configKey, newCfgJSON); err != nil { + return errors.Wrapf(err, "error updating volume %q config JSON", volume.Name()) + } + + return nil + }) + return err +} + // Pod retrieves a pod given its full ID func (s *BoltState) Pod(id string) (*Pod, error) { if id == "" { diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 408ef7224..6e4179835 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -449,6 +449,13 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name)) } + // Get the lock + lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID) + if err != nil { + return errors.Wrapf(err, "error retrieving lock for volume %q", string(name)) + } + volume.lock = lock + volume.runtime = s.runtime volume.valid = true diff --git a/libpod/container_graph.go b/libpod/container_graph.go index 5aa51bc2f..f6988e1ac 100644 --- a/libpod/container_graph.go +++ b/libpod/container_graph.go @@ -16,14 +16,30 @@ type containerNode struct { dependedOn []*containerNode } -type containerGraph struct { +// ContainerGraph is a dependency graph based on a set of containers. +type ContainerGraph struct { nodes map[string]*containerNode noDepNodes []*containerNode notDependedOnNodes map[string]*containerNode } -func buildContainerGraph(ctrs []*Container) (*containerGraph, error) { - graph := new(containerGraph) +// DependencyMap returns the dependency graph as map with the key being a +// container and the value being the containers the key depends on. +func (cg *ContainerGraph) DependencyMap() (dependencies map[*Container][]*Container) { + dependencies = make(map[*Container][]*Container) + for _, node := range cg.nodes { + dependsOn := make([]*Container, len(node.dependsOn)) + for i, d := range node.dependsOn { + dependsOn[i] = d.container + } + dependencies[node.container] = dependsOn + } + return dependencies +} + +// BuildContainerGraph builds a dependency graph based on the container slice. +func BuildContainerGraph(ctrs []*Container) (*ContainerGraph, error) { + graph := new(ContainerGraph) graph.nodes = make(map[string]*containerNode) graph.notDependedOnNodes = make(map[string]*containerNode) @@ -78,7 +94,7 @@ func buildContainerGraph(ctrs []*Container) (*containerGraph, error) { // Detect cycles in a container graph using Tarjan's strongly connected // components algorithm // Return true if a cycle is found, false otherwise -func detectCycles(graph *containerGraph) (bool, error) { +func detectCycles(graph *ContainerGraph) (bool, error) { type nodeInfo struct { index int lowLink int diff --git a/libpod/container_graph_test.go b/libpod/container_graph_test.go index d1a52658d..38f03c59c 100644 --- a/libpod/container_graph_test.go +++ b/libpod/container_graph_test.go @@ -8,7 +8,7 @@ import ( ) func TestBuildContainerGraphNoCtrsIsEmpty(t *testing.T) { - graph, err := buildContainerGraph([]*Container{}) + graph, err := BuildContainerGraph([]*Container{}) assert.NoError(t, err) assert.Equal(t, 0, len(graph.nodes)) assert.Equal(t, 0, len(graph.noDepNodes)) @@ -24,7 +24,7 @@ func TestBuildContainerGraphOneCtr(t *testing.T) { ctr1, err := getTestCtr1(manager) assert.NoError(t, err) - graph, err := buildContainerGraph([]*Container{ctr1}) + graph, err := BuildContainerGraph([]*Container{ctr1}) assert.NoError(t, err) assert.Equal(t, 1, len(graph.nodes)) assert.Equal(t, 1, len(graph.noDepNodes)) @@ -49,7 +49,7 @@ func TestBuildContainerGraphTwoCtrNoEdge(t *testing.T) { ctr2, err := getTestCtr2(manager) assert.NoError(t, err) - graph, err := buildContainerGraph([]*Container{ctr1, ctr2}) + graph, err := BuildContainerGraph([]*Container{ctr1, ctr2}) assert.NoError(t, err) assert.Equal(t, 2, len(graph.nodes)) assert.Equal(t, 2, len(graph.noDepNodes)) @@ -76,7 +76,7 @@ func TestBuildContainerGraphTwoCtrOneEdge(t *testing.T) { assert.NoError(t, err) ctr2.config.UserNsCtr = ctr1.config.ID - graph, err := buildContainerGraph([]*Container{ctr1, ctr2}) + graph, err := BuildContainerGraph([]*Container{ctr1, ctr2}) assert.NoError(t, err) assert.Equal(t, 2, len(graph.nodes)) assert.Equal(t, 1, len(graph.noDepNodes)) @@ -99,7 +99,7 @@ func TestBuildContainerGraphTwoCtrCycle(t *testing.T) { ctr2.config.UserNsCtr = ctr1.config.ID ctr1.config.NetNsCtr = ctr2.config.ID - _, err = buildContainerGraph([]*Container{ctr1, ctr2}) + _, err = BuildContainerGraph([]*Container{ctr1, ctr2}) assert.Error(t, err) } @@ -116,7 +116,7 @@ func TestBuildContainerGraphThreeCtrNoEdges(t *testing.T) { ctr3, err := getTestCtrN("3", manager) assert.NoError(t, err) - graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3}) + graph, err := BuildContainerGraph([]*Container{ctr1, ctr2, ctr3}) assert.NoError(t, err) assert.Equal(t, 3, len(graph.nodes)) assert.Equal(t, 3, len(graph.noDepNodes)) @@ -150,7 +150,7 @@ func TestBuildContainerGraphThreeContainersTwoInCycle(t *testing.T) { ctr1.config.UserNsCtr = ctr2.config.ID ctr2.config.IPCNsCtr = ctr1.config.ID - _, err = buildContainerGraph([]*Container{ctr1, ctr2, ctr3}) + _, err = BuildContainerGraph([]*Container{ctr1, ctr2, ctr3}) assert.Error(t, err) } @@ -170,7 +170,7 @@ func TestBuildContainerGraphThreeContainersCycle(t *testing.T) { ctr2.config.IPCNsCtr = ctr3.config.ID ctr3.config.NetNsCtr = ctr1.config.ID - _, err = buildContainerGraph([]*Container{ctr1, ctr2, ctr3}) + _, err = BuildContainerGraph([]*Container{ctr1, ctr2, ctr3}) assert.Error(t, err) } @@ -190,7 +190,7 @@ func TestBuildContainerGraphThreeContainersNoCycle(t *testing.T) { ctr1.config.NetNsCtr = ctr3.config.ID ctr2.config.IPCNsCtr = ctr3.config.ID - graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3}) + graph, err := BuildContainerGraph([]*Container{ctr1, ctr2, ctr3}) assert.NoError(t, err) assert.Equal(t, 3, len(graph.nodes)) assert.Equal(t, 1, len(graph.noDepNodes)) @@ -215,7 +215,7 @@ func TestBuildContainerGraphFourContainersNoEdges(t *testing.T) { ctr4, err := getTestCtrN("4", manager) assert.NoError(t, err) - graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) + graph, err := BuildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) assert.NoError(t, err) assert.Equal(t, 4, len(graph.nodes)) assert.Equal(t, 4, len(graph.noDepNodes)) @@ -256,7 +256,7 @@ func TestBuildContainerGraphFourContainersTwoInCycle(t *testing.T) { ctr1.config.IPCNsCtr = ctr2.config.ID ctr2.config.UserNsCtr = ctr1.config.ID - _, err = buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) + _, err = BuildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) assert.Error(t, err) } @@ -280,7 +280,7 @@ func TestBuildContainerGraphFourContainersAllInCycle(t *testing.T) { ctr3.config.NetNsCtr = ctr4.config.ID ctr4.config.UTSNsCtr = ctr1.config.ID - _, err = buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) + _, err = BuildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) assert.Error(t, err) } @@ -303,7 +303,7 @@ func TestBuildContainerGraphFourContainersNoneInCycle(t *testing.T) { ctr1.config.NetNsCtr = ctr3.config.ID ctr2.config.UserNsCtr = ctr3.config.ID - graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) + graph, err := BuildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4}) assert.NoError(t, err) assert.Equal(t, 4, len(graph.nodes)) assert.Equal(t, 2, len(graph.noDepNodes)) diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 313f67963..f51b53e85 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -788,7 +788,7 @@ func (c *Container) startDependencies(ctx context.Context) error { } // Build a dependency graph of containers - graph, err := buildContainerGraph(depCtrs) + graph, err := BuildContainerGraph(depCtrs) if err != nil { return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID()) } diff --git a/libpod/image/image.go b/libpod/image/image.go index cb7c390c6..0be6eeeb9 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "os" "path/filepath" "strings" "syscall" "time" - types2 "github.com/containernetworking/cni/pkg/types" cp "github.com/containers/image/copy" "github.com/containers/image/directory" dockerarchive "github.com/containers/image/docker/archive" @@ -383,11 +383,6 @@ func (i *Image) Remove(ctx context.Context, force bool) error { return nil } -// Decompose an Image -func (i *Image) Decompose() error { - return types2.NotImplementedError -} - // TODO: Rework this method to not require an assembly of the fq name with transport /* // GetManifest tries to GET an images manifest, returns nil on success and err on failure @@ -555,7 +550,7 @@ func (i *Image) UntagImage(tag string) error { // PushImageToHeuristicDestination pushes the given image to "destination", which is heuristically parsed. // Use PushImageToReference if the destination is known precisely. -func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { +func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { if destination == "" { return errors.Wrapf(syscall.EINVAL, "destination image name must be specified") } @@ -573,11 +568,11 @@ func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination return err } } - return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags) + return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, digestFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags) } // PushImageToReference pushes the given image to a location described by the given path -func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { +func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { sc := GetSystemContext(signaturePolicyPath, authFile, forceCompress) sc.BlobInfoCacheDir = filepath.Join(i.imageruntime.store.GraphRoot(), "cache") @@ -599,10 +594,22 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere copyOptions := getCopyOptions(sc, writer, nil, dockerRegistryOptions, signingOptions, manifestMIMEType, additionalDockerArchiveTags) copyOptions.DestinationCtx.SystemRegistriesConfPath = registries.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place. // Copy the image to the remote destination - _, err = cp.Image(ctx, policyContext, dest, src, copyOptions) + manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions) if err != nil { return errors.Wrapf(err, "Error copying image to the remote destination") } + digest, err := manifest.Digest(manifestBytes) + if err != nil { + return errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest)) + } + + logrus.Debugf("Successfully pushed %s with digest %s", transports.ImageName(dest), digest.String()) + + if digestFile != "" { + if err = ioutil.WriteFile(digestFile, []byte(digest.String()), 0644); err != nil { + return errors.Wrapf(err, "failed to write digest to file %q", digestFile) + } + } i.newImageEvent(events.Push) return nil } @@ -1358,7 +1365,7 @@ func (i *Image) Save(ctx context.Context, source, format, output string, moreTag return err } } - if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil { + if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil { return errors.Wrapf(err, "unable to save %q", source) } i.newImageEvent(events.Save) diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index 7c4abd25d..a9b735327 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -425,6 +425,26 @@ func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { return nil } +// RewriteVolumeConfig rewrites a volume's configuration. +// This function is DANGEROUS, even with in-memory state. +// Please read the full comment in state.go before using it. +func (s *InMemoryState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error { + if !volume.valid { + return define.ErrVolumeRemoved + } + + // If the volume does not exist, return error + stateVol, ok := s.volumes[volume.Name()] + if !ok { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name()) + } + + stateVol.config = newCfg + + return nil +} + // Volume retrieves a volume from its full name func (s *InMemoryState) Volume(name string) (*Volume, error) { if name == "" { diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index bef3f7739..fd14b2f73 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -17,7 +17,6 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/libpod/pkg/errorhandling" - "github.com/containers/libpod/pkg/firewall" "github.com/containers/libpod/pkg/netns" "github.com/containers/libpod/pkg/rootless" "github.com/cri-o/ocicni/pkg/ocicni" @@ -86,18 +85,6 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re networkStatus = append(networkStatus, resultCurrent) } - // Add firewall rules to ensure the container has network access. - // Will not be necessary once CNI firewall plugin merges upstream. - // https://github.com/containernetworking/plugins/pull/75 - for _, netStatus := range networkStatus { - firewallConf := &firewall.FirewallNetConf{ - PrevResult: netStatus, - } - if err := r.firewallBackend.Add(firewallConf); err != nil { - return nil, errors.Wrapf(err, "error adding firewall rules for container %s", ctr.ID()) - } - } - return networkStatus, nil } @@ -390,26 +377,12 @@ func (r *Runtime) closeNetNS(ctr *Container) error { } // Tear down a network namespace, undoing all state associated with it. -// The CNI firewall rules will be removed, the namespace will be unmounted, -// and the file descriptor associated with it closed. func (r *Runtime) teardownNetNS(ctr *Container) error { if ctr.state.NetNS == nil { // The container has no network namespace, we're set return nil } - // Remove firewall rules we added on configuring the container. - // Will not be necessary once CNI firewall plugin merges upstream. - // https://github.com/containernetworking/plugins/pull/75 - for _, netStatus := range ctr.state.NetworkStatus { - firewallConf := &firewall.FirewallNetConf{ - PrevResult: netStatus, - } - if err := r.firewallBackend.Del(firewallConf); err != nil { - return errors.Wrapf(err, "error removing firewall rules for container %s", ctr.ID()) - } - } - logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID()) var requestedIP net.IP diff --git a/libpod/pod_api.go b/libpod/pod_api.go index c7b0353bd..e2448e92a 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -37,7 +37,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) { } // Build a dependency graph of containers in the pod - graph, err := buildContainerGraph(allCtrs) + graph, err := BuildContainerGraph(allCtrs) if err != nil { return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID()) } @@ -289,7 +289,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { } // Build a dependency graph of containers in the pod - graph, err := buildContainerGraph(allCtrs) + graph, err := BuildContainerGraph(allCtrs) if err != nil { return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID()) } diff --git a/libpod/runtime.go b/libpod/runtime.go index cbbf667db..28774773e 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -23,7 +23,6 @@ import ( "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/libpod/lock" - "github.com/containers/libpod/pkg/firewall" sysreg "github.com/containers/libpod/pkg/registries" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" @@ -77,10 +76,6 @@ var ( // place of the configuration file pointed to by ConfigPath. OverrideConfigPath = etcDir + "/containers/libpod.conf" - // DefaultInfraImage to use for infra container - - // DefaultInfraCommand to be run in an infra container - // DefaultSHMLockPath is the default path for SHM locks DefaultSHMLockPath = "/libpod_lock" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks @@ -108,7 +103,6 @@ type Runtime struct { netPlugin ocicni.CNIPlugin conmonPath string imageRuntime *image.Runtime - firewallBackend firewall.FirewallBackend lockManager lock.Manager configuredFrom *runtimeConfiguredFrom @@ -1110,17 +1104,6 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { runtime.netPlugin = netPlugin } - // Set up a firewall backend - backendType := "" - if rootless.IsRootless() { - backendType = "none" - } - fwBackend, err := firewall.GetBackend(backendType) - if err != nil { - return err - } - runtime.firewallBackend = fwBackend - // We now need to see if the system has restarted // We check for the presence of a file in our tmp directory to verify this // This check must be locked to prevent races diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 92b2faefb..acd317d20 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -253,10 +253,13 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai // Go through named volumes and add them. // If they don't exist they will be created using basic options. + // Maintain an array of them - we need to lock them later. + ctrNamedVolumes := make([]*Volume, 0, len(ctr.config.NamedVolumes)) for _, vol := range ctr.config.NamedVolumes { // Check if it exists already - _, err := r.state.Volume(vol.Name) + dbVol, err := r.state.Volume(vol.Name) if err == nil { + ctrNamedVolumes = append(ctrNamedVolumes, dbVol) // The volume exists, we're good continue } else if errors.Cause(err) != config2.ErrNoSuchVolume { @@ -275,6 +278,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai if err := ctr.copyWithTarFromImage(vol.Dest, newVol.MountPoint()); err != nil && !os.IsNotExist(err) { return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Name) } + + ctrNamedVolumes = append(ctrNamedVolumes, newVol) } if ctr.config.LogPath == "" && ctr.config.LogDriver != JournaldLogging { @@ -291,6 +296,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir) } + // Lock all named volumes we are adding ourself to, to ensure we can't + // use a volume being removed. + for _, namedVol := range ctrNamedVolumes { + toLock := namedVol + toLock.lock.Lock() + defer toLock.lock.Unlock() + } + // Add the container to the state // TODO: May be worth looking into recovering from name/ID collisions here if ctr.config.Pod != "" { diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go index 735ffba34..9de2556b2 100644 --- a/libpod/runtime_renumber.go +++ b/libpod/runtime_renumber.go @@ -53,6 +53,23 @@ func (r *Runtime) renumberLocks() error { return err } } + allVols, err := r.state.AllVolumes() + if err != nil { + return err + } + for _, vol := range allVols { + lock, err := r.lockManager.AllocateLock() + if err != nil { + return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name()) + } + + vol.config.LockID = lock.ID() + + // Write the new lock ID + if err := r.state.RewriteVolumeConfig(vol, vol.config); err != nil { + return err + } + } r.newSystemEvent(events.Renumber) diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go index d05db936b..512e778a1 100644 --- a/libpod/runtime_volume.go +++ b/libpod/runtime_volume.go @@ -36,6 +36,10 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error return nil } } + + v.lock.Lock() + defer v.lock.Unlock() + return r.removeVolume(ctx, v, force) } diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index 84703787d..70296248c 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -28,7 +28,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) } // newVolume creates a new empty volume -func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) { +func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, Err error) { volume, err := newVolume(r) if err != nil { return nil, errors.Wrapf(err, "error creating volume") @@ -68,6 +68,21 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } volume.config.MountPoint = fullVolPath + lock, err := r.lockManager.AllocateLock() + if err != nil { + return nil, errors.Wrapf(err, "error allocating lock for new volume") + } + volume.lock = lock + volume.config.LockID = volume.lock.ID() + + defer func() { + if Err != nil { + if err := volume.lock.Free(); err != nil { + logrus.Errorf("Error freeing volume lock after failed creation: %v", err) + } + } + }() + volume.valid = true // Add the volume to state @@ -110,6 +125,8 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name()) } + logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name()) + // TODO: do we want to set force here when removing // containers? // I'm inclined to say no, in case someone accidentally @@ -128,12 +145,24 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error return errors.Wrapf(err, "error removing volume %s", v.Name()) } - // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes + var removalErr error + + // Free the volume's lock + if err := v.lock.Free(); err != nil { + removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name()) + } + + // Delete the mountpoint path of the volume, that is delete the volume + // from /var/lib/containers/storage/volumes if err := v.teardownStorage(); err != nil { - return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) + if removalErr == nil { + removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) + } else { + logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err) + } } defer v.newVolumeEvent(events.Remove) logrus.Debugf("Removed volume %s", v.Name()) - return nil + return removalErr } diff --git a/libpod/state.go b/libpod/state.go index d0ad1a1f8..5d704e69a 100644 --- a/libpod/state.go +++ b/libpod/state.go @@ -115,12 +115,20 @@ type State interface { // answer is this: use this only very sparingly, and only if you really // know what you're doing. RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error - // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING. + // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING. // This function is identical to RewriteContainerConfig, save for the // fact that it is used with pods instead. // It is subject to the same conditions as RewriteContainerConfig. // Please do not use this unless you know what you're doing. RewritePodConfig(pod *Pod, newCfg *PodConfig) error + // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING. + // This function is identical to RewriteContainerConfig, save for the + // fact that it is used with volumes instead. + // It is subject to the same conditions as RewriteContainerConfig. + // The exception is that volumes do not have IDs, so only volume name + // cannot be altered. + // Please do not use this unless you know what you're doing. + RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error // Accepts full ID of pod. // If the pod given is not in the set namespace, an error will be diff --git a/libpod/volume.go b/libpod/volume.go index 74126b49b..abfa7b3f4 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -2,6 +2,8 @@ package libpod import ( "time" + + "github.com/containers/libpod/libpod/lock" ) // Volume is the type used to create named volumes @@ -11,21 +13,35 @@ type Volume struct { valid bool runtime *Runtime + lock lock.Locker } // VolumeConfig holds the volume's config information type VolumeConfig struct { - // Name of the volume + // Name of the volume. Name string `json:"name"` - - Labels map[string]string `json:"labels"` - Driver string `json:"driver"` - MountPoint string `json:"mountPoint"` - CreatedTime time.Time `json:"createdAt,omitempty"` - Options map[string]string `json:"options"` - IsCtrSpecific bool `json:"ctrSpecific"` - UID int `json:"uid"` - GID int `json:"gid"` + // ID of the volume's lock. + LockID uint32 `json:"lockID"` + // Labels for the volume. + Labels map[string]string `json:"labels"` + // The volume driver. Empty string or local does not activate a volume + // driver, all other volumes will. + Driver string `json:"driver"` + // The location the volume is mounted at. + MountPoint string `json:"mountPoint"` + // Time the volume was created. + CreatedTime time.Time `json:"createdAt,omitempty"` + // Options to pass to the volume driver. For the local driver, this is + // a list of mount options. For other drivers, they are passed to the + // volume driver handling the volume. + Options map[string]string `json:"options"` + // Whether this volume was created for a specific container and will be + // removed with it. + IsCtrSpecific bool `json:"ctrSpecific"` + // UID the volume will be created as. + UID int `json:"uid"` + // GID the volume will be created as. + GID int `json:"gid"` } // Name retrieves the volume's name diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go index 863640f97..41607145d 100644 --- a/pkg/adapter/containers.go +++ b/pkg/adapter/containers.go @@ -1094,28 +1094,145 @@ func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) { return portContainers, nil } -// GenerateSystemd creates a unit file for a container -func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (string, error) { - ctr, err := r.Runtime.LookupContainer(c.InputArgs[0]) +// generateServiceName generates the container name and the service name for systemd service. +func generateServiceName(c *cliconfig.GenerateSystemdValues, ctr *libpod.Container, pod *libpod.Pod) (string, string) { + var kind, name, ctrName string + if pod == nil { + kind = "container" + name = ctr.ID() + if c.Name { + name = ctr.Name() + } + ctrName = name + } else { + kind = "pod" + name = pod.ID() + ctrName = ctr.ID() + if c.Name { + name = pod.Name() + ctrName = ctr.Name() + } + } + return ctrName, fmt.Sprintf("%s-%s", kind, name) +} + +// generateSystemdgenContainerInfo is a helper to generate a +// systemdgen.ContainerInfo for `GenerateSystemd`. +func (r *LocalRuntime) generateSystemdgenContainerInfo(c *cliconfig.GenerateSystemdValues, nameOrID string, pod *libpod.Pod) (*systemdgen.ContainerInfo, bool, error) { + ctr, err := r.Runtime.LookupContainer(nameOrID) if err != nil { - return "", err + return nil, false, err } + timeout := int(ctr.StopTimeout()) if c.StopTimeout >= 0 { timeout = c.StopTimeout } - name := ctr.ID() - if c.Name { - name = ctr.Name() - } config := ctr.Config() conmonPidFile := config.ConmonPidFile if conmonPidFile == "" { - return "", errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag") + return nil, true, errors.Errorf("conmon PID file path is empty, try to recreate the container with --conmon-pidfile flag") + } + + name, serviceName := generateServiceName(c, ctr, pod) + info := &systemdgen.ContainerInfo{ + ServiceName: serviceName, + ContainerName: name, + RestartPolicy: c.RestartPolicy, + PIDFile: conmonPidFile, + StopTimeout: timeout, + GenerateTimestamp: true, + } + + return info, true, nil +} + +// GenerateSystemd creates a unit file for a container or pod. +func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (string, error) { + // First assume it's a container. + if info, found, err := r.generateSystemdgenContainerInfo(c, c.InputArgs[0], nil); found && err != nil { + return "", err + } else if found && err == nil { + return systemdgen.CreateContainerSystemdUnit(info, c.Files) + } + + // We're either having a pod or garbage. + pod, err := r.Runtime.LookupPod(c.InputArgs[0]) + if err != nil { + return "", err + } + + // Error out if the pod has no infra container, which we require to be the + // main service. + if !pod.HasInfraContainer() { + return "", fmt.Errorf("error generating systemd unit files: Pod %q has no infra container", pod.Name()) + } + + // Generate a systemdgen.ContainerInfo for the infra container. This + // ContainerInfo acts as the main service of the pod. + infraID, err := pod.InfraContainerID() + if err != nil { + return "", nil + } + podInfo, _, err := r.generateSystemdgenContainerInfo(c, infraID, pod) + if err != nil { + return "", nil + } + + // Compute the container-dependency graph for the Pod. + containers, err := pod.AllContainers() + if err != nil { + return "", err + } + if len(containers) == 0 { + return "", fmt.Errorf("error generating systemd unit files: Pod %q has no containers", pod.Name()) + } + graph, err := libpod.BuildContainerGraph(containers) + if err != nil { + return "", err + } + + // Traverse the dependency graph and create systemdgen.ContainerInfo's for + // each container. + containerInfos := []*systemdgen.ContainerInfo{podInfo} + for ctr, dependencies := range graph.DependencyMap() { + // Skip the infra container as we already generated it. + if ctr.ID() == infraID { + continue + } + ctrInfo, _, err := r.generateSystemdgenContainerInfo(c, ctr.ID(), nil) + if err != nil { + return "", err + } + // Now add the container's dependencies and at the container as a + // required service of the infra container. + for _, dep := range dependencies { + if dep.ID() == infraID { + ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, podInfo.ServiceName) + } else { + _, serviceName := generateServiceName(c, dep, nil) + ctrInfo.BoundToServices = append(ctrInfo.BoundToServices, serviceName) + } + } + podInfo.RequiredServices = append(podInfo.RequiredServices, ctrInfo.ServiceName) + containerInfos = append(containerInfos, ctrInfo) + } + + // Now generate the systemd service for all containers. + builder := strings.Builder{} + for i, info := range containerInfos { + if i > 0 { + builder.WriteByte('\n') + } + out, err := systemdgen.CreateContainerSystemdUnit(info, c.Files) + if err != nil { + return "", err + } + builder.WriteString(out) } - return systemdgen.CreateSystemdUnitAsString(name, ctr.ID(), c.RestartPolicy, conmonPidFile, timeout) + return builder.String(), nil } // GetNamespaces returns namespace information about a container for PS diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go index 5a26f537f..590fef43f 100644 --- a/pkg/adapter/containers_remote.go +++ b/pkg/adapter/containers_remote.go @@ -951,7 +951,7 @@ func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) { // GenerateSystemd creates a systemd until for a container func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (string, error) { - return iopodman.GenerateSystemd().Call(r.Conn, c.InputArgs[0], c.RestartPolicy, int64(c.StopTimeout), c.Name) + return "", errors.New("systemd generation not supported for remote clients") } // GetNamespaces returns namespace information about a container for PS diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go index 7d4f97b28..0537308f8 100644 --- a/pkg/adapter/runtime.go +++ b/pkg/adapter/runtime.go @@ -201,12 +201,12 @@ func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmV } // Push is a wrapper to push an image to a registry -func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { +func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { newImage, err := r.ImageRuntime().NewFromLocal(srcName) if err != nil { return err } - return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil) + return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil) } // InspectVolumes returns a slice of volumes based on an arg list or --all diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go index 683bf1d35..8588966b6 100644 --- a/pkg/adapter/runtime_remote.go +++ b/pkg/adapter/runtime_remote.go @@ -619,7 +619,7 @@ func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmV return iopodman.VolumeRemove().Call(r.Conn, rmOpts) } -func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { +func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { reply, err := iopodman.PushImage().Send(r.Conn, varlink.More, srcName, destination, forceCompress, manifestMIMEType, signingOptions.RemoveSignatures, signingOptions.SignBy) if err != nil { diff --git a/pkg/firewall/common.go b/pkg/firewall/common.go deleted file mode 100644 index a65d4f03d..000000000 --- a/pkg/firewall/common.go +++ /dev/null @@ -1,55 +0,0 @@ -package firewall - -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net" - - "github.com/containernetworking/cni/pkg/types/current" -) - -// FirewallNetConf represents the firewall configuration. -// Nolint applied for firewall.Firewall... name duplication notice. -//nolint -type FirewallNetConf struct { - //types.NetConf - - // IptablesAdminChainName is an optional name to use instead of the default - // admin rules override chain name that includes the interface name. - IptablesAdminChainName string - - // FirewalldZone is an optional firewalld zone to place the interface into. If - // the firewalld backend is used but the zone is not given, it defaults - // to 'trusted' - FirewalldZone string - - PrevResult *current.Result -} - -// FirewallBackend is an interface to the system firewall, allowing addition and -// removal of firewall rules. -// Nolint applied for firewall.Firewall... name duplication notice. -//nolint -type FirewallBackend interface { - Add(*FirewallNetConf) error - Del(*FirewallNetConf) error -} - -func ipString(ip net.IPNet) string { - if ip.IP.To4() == nil { - return ip.IP.String() + "/128" - } - return ip.IP.String() + "/32" -} diff --git a/pkg/firewall/firewall_linux.go b/pkg/firewall/firewall_linux.go deleted file mode 100644 index 4ac45427b..000000000 --- a/pkg/firewall/firewall_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build linux - -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package firewall - -import ( - "fmt" -) - -// GetBackend retrieves a firewall backend for adding or removing firewall rules -// on the system. -// Valid backend names are firewalld, iptables, and none. -// If the empty string is given, a firewalld backend will be returned if -// firewalld is running, and an iptables backend will be returned otherwise. -func GetBackend(backend string) (FirewallBackend, error) { - switch backend { - case "firewalld": - return newFirewalldBackend() - case "iptables": - return newIptablesBackend() - case "none": - return newNoneBackend() - case "": - // Default to firewalld if it's running - if isFirewalldRunning() { - return newFirewalldBackend() - } - - // Otherwise iptables - return newIptablesBackend() - default: - return nil, fmt.Errorf("unrecognized firewall backend %q", backend) - } -} diff --git a/pkg/firewall/firewall_none.go b/pkg/firewall/firewall_none.go deleted file mode 100644 index 9add24842..000000000 --- a/pkg/firewall/firewall_none.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package firewall - -import ( - "fmt" -) - -// FirewallNone is a firewall backend for environments where manipulating the -// system firewall is unsupported (for example, when running without root). -// Nolint applied to avoid firewall.FirewallNone name duplication notes. -//nolint -type FirewallNone struct{} - -func newNoneBackend() (FirewallBackend, error) { - return &FirewallNone{}, nil -} - -// Add adds a rule to the system firewall. -// No action is taken and an error is unconditionally returned as this backend -// does not support manipulating the firewall. -func (f *FirewallNone) Add(conf *FirewallNetConf) error { - return fmt.Errorf("cannot modify system firewall rules") -} - -// Del deletes a rule from the system firewall. -// No action is taken and an error is unconditionally returned as this backend -// does not support manipulating the firewall. -func (f *FirewallNone) Del(conf *FirewallNetConf) error { - return fmt.Errorf("cannot modify system firewall rules") -} diff --git a/pkg/firewall/firewall_unsupported.go b/pkg/firewall/firewall_unsupported.go deleted file mode 100644 index 24c07a8a9..000000000 --- a/pkg/firewall/firewall_unsupported.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !linux - -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package firewall - -import ( - "fmt" -) - -// GetBackend retrieves a firewall backend for adding or removing firewall rules -// on the system. -func GetBackend(backend string) (FirewallBackend, error) { - return nil, fmt.Errorf("firewall backends are not presently supported on this OS") -} diff --git a/pkg/firewall/firewalld.go b/pkg/firewall/firewalld.go deleted file mode 100644 index 15e845cb7..000000000 --- a/pkg/firewall/firewalld.go +++ /dev/null @@ -1,122 +0,0 @@ -// +build linux - -// Copyright 2018 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package firewall - -import ( - "fmt" - "github.com/sirupsen/logrus" - "strings" - - "github.com/godbus/dbus" -) - -const ( - dbusName = "org.freedesktop.DBus" - dbusPath = "/org/freedesktop/DBus" - dbusGetNameOwnerMethod = "GetNameOwner" - - firewalldName = "org.fedoraproject.FirewallD1" - firewalldPath = "/org/fedoraproject/FirewallD1" - firewalldZoneInterface = "org.fedoraproject.FirewallD1.zone" - firewalldAddSourceMethod = "addSource" - firewalldRemoveSourceMethod = "removeSource" - - errZoneAlreadySet = "ZONE_ALREADY_SET" -) - -// Only used for testcases to override the D-Bus connection -var testConn *dbus.Conn - -type fwdBackend struct { - conn *dbus.Conn -} - -// fwdBackend implements the FirewallBackend interface -var _ FirewallBackend = &fwdBackend{} - -func getConn() (*dbus.Conn, error) { - if testConn != nil { - return testConn, nil - } - return dbus.SystemBus() -} - -// isFirewalldRunning checks whether firewalld is running. -func isFirewalldRunning() bool { - conn, err := getConn() - if err != nil { - return false - } - - dbusObj := conn.Object(dbusName, dbusPath) - var res string - if err := dbusObj.Call(dbusName+"."+dbusGetNameOwnerMethod, 0, firewalldName).Store(&res); err != nil { - return false - } - - return true -} - -func newFirewalldBackend() (FirewallBackend, error) { - conn, err := getConn() - if err != nil { - return nil, err - } - - backend := &fwdBackend{ - conn: conn, - } - return backend, nil -} - -func getFirewalldZone(conf *FirewallNetConf) string { - if conf.FirewalldZone != "" { - return conf.FirewalldZone - } - - return "trusted" -} - -func (fb *fwdBackend) Add(conf *FirewallNetConf) error { - zone := getFirewalldZone(conf) - - for _, ip := range conf.PrevResult.IPs { - ipStr := ipString(ip.Address) - // Add a firewalld rule which assigns the given source IP to the given zone - firewalldObj := fb.conn.Object(firewalldName, firewalldPath) - var res string - if err := firewalldObj.Call(firewalldZoneInterface+"."+firewalldAddSourceMethod, 0, zone, ipStr).Store(&res); err != nil { - if !strings.Contains(err.Error(), errZoneAlreadySet) { - return fmt.Errorf("failed to add the address %v to %v zone: %v", ipStr, zone, err) - } - } - } - return nil -} - -func (fb *fwdBackend) Del(conf *FirewallNetConf) error { - for _, ip := range conf.PrevResult.IPs { - ipStr := ipString(ip.Address) - // Remove firewalld rules which assigned the given source IP to the given zone - firewalldObj := fb.conn.Object(firewalldName, firewalldPath) - var res string - if err := firewalldObj.Call(firewalldZoneInterface+"."+firewalldRemoveSourceMethod, 0, getFirewalldZone(conf), ipStr).Store(&res); err != nil { - logrus.Errorf("unable to store firewallobj") - } - } - return nil -} diff --git a/pkg/firewall/iptables.go b/pkg/firewall/iptables.go deleted file mode 100644 index 169ddc1d7..000000000 --- a/pkg/firewall/iptables.go +++ /dev/null @@ -1,195 +0,0 @@ -// +build linux - -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This is a "meta-plugin". It reads in its own netconf, it does not create -// any network interface but just changes the network sysctl. - -package firewall - -import ( - "fmt" - "github.com/sirupsen/logrus" - "net" - - "github.com/coreos/go-iptables/iptables" -) - -func getPrivChainRules(ip string) [][]string { - var rules [][]string - rules = append(rules, []string{"-d", ip, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}) - rules = append(rules, []string{"-s", ip, "-j", "ACCEPT"}) - return rules -} - -func ensureChain(ipt *iptables.IPTables, table, chain string) error { - chains, err := ipt.ListChains(table) - if err != nil { - return fmt.Errorf("failed to list iptables chains: %v", err) - } - for _, ch := range chains { - if ch == chain { - return nil - } - } - - return ipt.NewChain(table, chain) -} - -func generateFilterRule(privChainName string) []string { - return []string{"-m", "comment", "--comment", "CNI firewall plugin rules", "-j", privChainName} -} - -func cleanupRules(ipt *iptables.IPTables, privChainName string, rules [][]string) { - for _, rule := range rules { - if err := ipt.Delete("filter", privChainName, rule...); err != nil { - logrus.Errorf("failed to delete iptables rule %s", privChainName) - } - } -} - -func ensureFirstChainRule(ipt *iptables.IPTables, chain string, rule []string) error { - exists, err := ipt.Exists("filter", chain, rule...) - if !exists && err == nil { - err = ipt.Insert("filter", chain, 1, rule...) - } - return err -} - -func (ib *iptablesBackend) setupChains(ipt *iptables.IPTables) error { - privRule := generateFilterRule(ib.privChainName) - adminRule := generateFilterRule(ib.adminChainName) - - // Ensure our private chains exist - if err := ensureChain(ipt, "filter", ib.privChainName); err != nil { - return err - } - if err := ensureChain(ipt, "filter", ib.adminChainName); err != nil { - return err - } - - // Ensure our filter rule exists in the forward chain - if err := ensureFirstChainRule(ipt, "FORWARD", privRule); err != nil { - return err - } - - // Ensure our admin override chain rule exists in our private chain - if err := ensureFirstChainRule(ipt, ib.privChainName, adminRule); err != nil { - return err - } - - return nil -} - -func protoForIP(ip net.IPNet) iptables.Protocol { - if ip.IP.To4() != nil { - return iptables.ProtocolIPv4 - } - return iptables.ProtocolIPv6 -} - -func (ib *iptablesBackend) addRules(conf *FirewallNetConf, ipt *iptables.IPTables, proto iptables.Protocol) error { - rules := make([][]string, 0) - for _, ip := range conf.PrevResult.IPs { - if protoForIP(ip.Address) == proto { - rules = append(rules, getPrivChainRules(ipString(ip.Address))...) - } - } - - if len(rules) > 0 { - if err := ib.setupChains(ipt); err != nil { - return err - } - - // Clean up on any errors - var err error - defer func() { - if err != nil { - cleanupRules(ipt, ib.privChainName, rules) - } - }() - - for _, rule := range rules { - err = ipt.AppendUnique("filter", ib.privChainName, rule...) - if err != nil { - return err - } - } - } - - return nil -} - -func (ib *iptablesBackend) delRules(conf *FirewallNetConf, ipt *iptables.IPTables, proto iptables.Protocol) error { - rules := make([][]string, 0) - for _, ip := range conf.PrevResult.IPs { - if protoForIP(ip.Address) == proto { - rules = append(rules, getPrivChainRules(ipString(ip.Address))...) - } - } - - if len(rules) > 0 { - cleanupRules(ipt, ib.privChainName, rules) - } - - return nil -} - -type iptablesBackend struct { - protos map[iptables.Protocol]*iptables.IPTables - privChainName string - adminChainName string -} - -// iptablesBackend implements the FirewallBackend interface -var _ FirewallBackend = &iptablesBackend{} - -func newIptablesBackend() (FirewallBackend, error) { - adminChainName := "CNI-ADMIN" - - backend := &iptablesBackend{ - privChainName: "CNI-FORWARD", - adminChainName: adminChainName, - protos: make(map[iptables.Protocol]*iptables.IPTables), - } - - for _, proto := range []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6} { - ipt, err := iptables.NewWithProtocol(proto) - if err != nil { - return nil, fmt.Errorf("could not initialize iptables protocol %v: %v", proto, err) - } - backend.protos[proto] = ipt - } - - return backend, nil -} - -func (ib *iptablesBackend) Add(conf *FirewallNetConf) error { - for proto, ipt := range ib.protos { - if err := ib.addRules(conf, ipt, proto); err != nil { - return err - } - } - return nil -} - -func (ib *iptablesBackend) Del(conf *FirewallNetConf) error { - for proto, ipt := range ib.protos { - if err := ib.delRules(conf, ipt, proto); err != nil { - logrus.Errorf("failed to delete iptables backend rule %s", conf.IptablesAdminChainName) - } - } - return nil -} diff --git a/pkg/systemdgen/systemdgen.go b/pkg/systemdgen/systemdgen.go index 06c5ebde5..09d3c6fd5 100644 --- a/pkg/systemdgen/systemdgen.go +++ b/pkg/systemdgen/systemdgen.go @@ -1,29 +1,59 @@ package systemdgen import ( + "bytes" "fmt" + "io/ioutil" "os" + "path/filepath" + "sort" + "text/template" + "time" + "github.com/containers/libpod/version" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -var template = `[Unit] -Description=%s Podman Container -[Service] -Restart=%s -ExecStart=%s start %s -ExecStop=%s stop -t %d %s -KillMode=none -Type=forking -PIDFile=%s -[Install] -WantedBy=multi-user.target` +// ContainerInfo contains data required for generating a container's systemd +// unit file. +type ContainerInfo struct { + // ServiceName of the systemd service. + ServiceName string + // Name or ID of the container. + ContainerName string + // InfraContainer of the pod. + InfraContainer string + // StopTimeout sets the timeout Podman waits before killing the container + // during service stop. + StopTimeout int + // RestartPolicy of the systemd unit (e.g., no, on-failure, always). + RestartPolicy string + // PIDFile of the service. Required for forking services. Must point to the + // PID of the associated conmon process. + PIDFile string + // GenerateTimestamp, if set the generated unit file has a time stamp. + GenerateTimestamp bool + // BoundToServices are the services this service binds to. Note that this + // service runs after them. + BoundToServices []string + // RequiredServices are services this service requires. Note that this + // service runs before them. + RequiredServices []string + // PodmanVersion for the header. Will be set internally. Will be auto-filled + // if left empty. + PodmanVersion string + // Executable is the path to the podman executable. Will be auto-filled if + // left empty. + Executable string + // TimeStamp at the time of creating the unit file. Will be set internally. + TimeStamp string +} var restartPolicies = []string{"no", "on-success", "on-failure", "on-abnormal", "on-watchdog", "on-abort", "always"} -// ValidateRestartPolicy checks that the user-provided policy is valid -func ValidateRestartPolicy(restart string) error { +// validateRestartPolicy checks that the user-provided policy is valid. +func validateRestartPolicy(restart string) error { for _, i := range restartPolicies { if i == restart { return nil @@ -32,28 +62,87 @@ func ValidateRestartPolicy(restart string) error { return errors.Errorf("%s is not a valid restart policy", restart) } -// CreateSystemdUnitAsString takes variables to create a systemd unit file used to control -// a libpod container -func CreateSystemdUnitAsString(name, cid, restart, pidFile string, stopTimeout int) (string, error) { - podmanExe := getPodmanExecutable() - return createSystemdUnitAsString(podmanExe, name, cid, restart, pidFile, stopTimeout) -} +const containerTemplate = `# {{.ServiceName}}.service +# autogenerated by Podman {{.PodmanVersion}} +{{- if .TimeStamp}} +# {{.TimeStamp}} +{{- end}} + +[Unit] +Description=Podman {{.ServiceName}}.service +Documentation=man:podman-generate-systemd(1) +{{- if .BoundToServices}} +RefuseManualStart=yes +RefuseManualStop=yes +BindsTo={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}} +After={{- range $index, $value := .BoundToServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}} +{{- end}} +{{- if .RequiredServices}} +Requires={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}} +Before={{- range $index, $value := .RequiredServices -}}{{if $index}} {{end}}{{ $value }}.service{{end}} +{{- end}} + +[Service] +Restart={{.RestartPolicy}} +ExecStart={{.Executable}} start {{.ContainerName}} +ExecStop={{.Executable}} stop {{if (ge .StopTimeout 0)}}-t {{.StopTimeout}}{{end}} {{.ContainerName}} +KillMode=none +Type=forking +PIDFile={{.PIDFile}} + +[Install] +WantedBy=multi-user.target` -func createSystemdUnitAsString(exe, name, cid, restart, pidFile string, stopTimeout int) (string, error) { - if err := ValidateRestartPolicy(restart); err != nil { +// CreateContainerSystemdUnit creates a systemd unit file for a container. +func CreateContainerSystemdUnit(info *ContainerInfo, generateFiles bool) (string, error) { + if err := validateRestartPolicy(info.RestartPolicy); err != nil { return "", err } - unit := fmt.Sprintf(template, name, restart, exe, name, exe, stopTimeout, name, pidFile) - return unit, nil -} + // Make sure the executable is set. + if info.Executable == "" { + executable, err := os.Executable() + if err != nil { + executable = "/usr/bin/podman" + logrus.Warnf("Could not obtain podman executable location, using default %s", executable) + } + info.Executable = executable + } + + if info.PodmanVersion == "" { + info.PodmanVersion = version.Version + } + if info.GenerateTimestamp { + info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate)) + } -func getPodmanExecutable() string { - podmanExe, err := os.Executable() + // Sort the slices to assure a deterministic output. + sort.Strings(info.RequiredServices) + sort.Strings(info.BoundToServices) + + // Generate the template and compile it. + templ, err := template.New("systemd_service_file").Parse(containerTemplate) if err != nil { - podmanExe = "/usr/bin/podman" - logrus.Warnf("Could not obtain podman executable location, using default %s", podmanExe) + return "", errors.Wrap(err, "error parsing systemd service template") } - return podmanExe + var buf bytes.Buffer + if err := templ.Execute(&buf, info); err != nil { + return "", err + } + + if !generateFiles { + return buf.String(), nil + } + + buf.WriteByte('\n') + cwd, err := os.Getwd() + if err != nil { + return "", errors.Wrap(err, "error getting current working directory") + } + path := filepath.Join(cwd, fmt.Sprintf("%s.service", info.ServiceName)) + if err := ioutil.WriteFile(path, buf.Bytes(), 0644); err != nil { + return "", errors.Wrap(err, "error generating systemd unit") + } + return path, nil } diff --git a/pkg/systemdgen/systemdgen_test.go b/pkg/systemdgen/systemdgen_test.go index e413b24ce..1ddb0c514 100644 --- a/pkg/systemdgen/systemdgen_test.go +++ b/pkg/systemdgen/systemdgen_test.go @@ -5,36 +5,41 @@ import ( ) func TestValidateRestartPolicy(t *testing.T) { - type args struct { + type ContainerInfo struct { restart string } tests := []struct { - name string - args args - wantErr bool + name string + ContainerInfo ContainerInfo + wantErr bool }{ - {"good-on", args{restart: "no"}, false}, - {"good-on-success", args{restart: "on-success"}, false}, - {"good-on-failure", args{restart: "on-failure"}, false}, - {"good-on-abnormal", args{restart: "on-abnormal"}, false}, - {"good-on-watchdog", args{restart: "on-watchdog"}, false}, - {"good-on-abort", args{restart: "on-abort"}, false}, - {"good-always", args{restart: "always"}, false}, - {"fail", args{restart: "foobar"}, true}, - {"failblank", args{restart: ""}, true}, + {"good-on", ContainerInfo{restart: "no"}, false}, + {"good-on-success", ContainerInfo{restart: "on-success"}, false}, + {"good-on-failure", ContainerInfo{restart: "on-failure"}, false}, + {"good-on-abnormal", ContainerInfo{restart: "on-abnormal"}, false}, + {"good-on-watchdog", ContainerInfo{restart: "on-watchdog"}, false}, + {"good-on-abort", ContainerInfo{restart: "on-abort"}, false}, + {"good-always", ContainerInfo{restart: "always"}, false}, + {"fail", ContainerInfo{restart: "foobar"}, true}, + {"failblank", ContainerInfo{restart: ""}, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := ValidateRestartPolicy(tt.args.restart); (err != nil) != tt.wantErr { + if err := validateRestartPolicy(tt.ContainerInfo.restart); (err != nil) != tt.wantErr { t.Errorf("ValidateRestartPolicy() error = %v, wantErr %v", err, tt.wantErr) } }) } } -func TestCreateSystemdUnitAsString(t *testing.T) { - goodID := `[Unit] -Description=639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401 Podman Container +func TestCreateContainerSystemdUnit(t *testing.T) { + goodID := `# container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service +# autogenerated by Podman CI + +[Unit] +Description=Podman container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401.service +Documentation=man:podman-generate-systemd(1) + [Service] Restart=always ExecStart=/usr/bin/podman start 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401 @@ -42,11 +47,17 @@ ExecStop=/usr/bin/podman stop -t 10 639c53578af4d84b8800b4635fa4e680ee80fd67e0e6 KillMode=none Type=forking PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid + [Install] WantedBy=multi-user.target` - goodName := `[Unit] -Description=foobar Podman Container + goodName := `# container-foobar.service +# autogenerated by Podman CI + +[Unit] +Description=Podman container-foobar.service +Documentation=man:podman-generate-systemd(1) + [Service] Restart=always ExecStart=/usr/bin/podman start foobar @@ -54,56 +65,121 @@ ExecStop=/usr/bin/podman stop -t 10 foobar KillMode=none Type=forking PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid + +[Install] +WantedBy=multi-user.target` + + goodNameBoundTo := `# container-foobar.service +# autogenerated by Podman CI + +[Unit] +Description=Podman container-foobar.service +Documentation=man:podman-generate-systemd(1) +RefuseManualStart=yes +RefuseManualStop=yes +BindsTo=a.service b.service c.service pod.service +After=a.service b.service c.service pod.service + +[Service] +Restart=always +ExecStart=/usr/bin/podman start foobar +ExecStop=/usr/bin/podman stop -t 10 foobar +KillMode=none +Type=forking +PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid + +[Install] +WantedBy=multi-user.target` + + podGoodName := `# pod-123abc.service +# autogenerated by Podman CI + +[Unit] +Description=Podman pod-123abc.service +Documentation=man:podman-generate-systemd(1) +Requires=container-1.service container-2.service +Before=container-1.service container-2.service + +[Service] +Restart=always +ExecStart=/usr/bin/podman start jadda-jadda-infra +ExecStop=/usr/bin/podman stop -t 10 jadda-jadda-infra +KillMode=none +Type=forking +PIDFile=/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid + [Install] WantedBy=multi-user.target` - type args struct { - exe string - name string - cid string - restart string - pidFile string - stopTimeout int - } tests := []struct { name string - args args + info ContainerInfo want string wantErr bool }{ {"good with id", - args{ - "/usr/bin/podman", - "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401", - "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401", - "always", - "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", - 10, + ContainerInfo{ + Executable: "/usr/bin/podman", + ServiceName: "container-639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401", + ContainerName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401", + RestartPolicy: "always", + PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", + StopTimeout: 10, + PodmanVersion: "CI", }, goodID, false, }, {"good with name", - args{ - "/usr/bin/podman", - "foobar", - "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401", - "always", - "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", - 10, + ContainerInfo{ + Executable: "/usr/bin/podman", + ServiceName: "container-foobar", + ContainerName: "foobar", + RestartPolicy: "always", + PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", + StopTimeout: 10, + PodmanVersion: "CI", }, goodName, false, }, + {"good with name and bound to", + ContainerInfo{ + Executable: "/usr/bin/podman", + ServiceName: "container-foobar", + ContainerName: "foobar", + RestartPolicy: "always", + PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", + StopTimeout: 10, + PodmanVersion: "CI", + BoundToServices: []string{"pod", "a", "b", "c"}, + }, + goodNameBoundTo, + false, + }, + {"pod", + ContainerInfo{ + Executable: "/usr/bin/podman", + ServiceName: "pod-123abc", + ContainerName: "jadda-jadda-infra", + RestartPolicy: "always", + PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", + StopTimeout: 10, + PodmanVersion: "CI", + RequiredServices: []string{"container-1", "container-2"}, + }, + podGoodName, + false, + }, {"bad restart policy", - args{ - "/usr/bin/podman", - "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401", - "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401", - "never", - "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", - 10, + ContainerInfo{ + Executable: "/usr/bin/podman", + ServiceName: "639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401", + RestartPolicy: "never", + PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", + StopTimeout: 10, + PodmanVersion: "CI", }, "", true, @@ -111,13 +187,13 @@ WantedBy=multi-user.target` } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := createSystemdUnitAsString(tt.args.exe, tt.args.name, tt.args.cid, tt.args.restart, tt.args.pidFile, tt.args.stopTimeout) + got, err := CreateContainerSystemdUnit(&tt.info, false) if (err != nil) != tt.wantErr { - t.Errorf("CreateSystemdUnitAsString() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("CreateContainerSystemdUnit() error = \n%v, wantErr \n%v", err, tt.wantErr) return } if got != tt.want { - t.Errorf("CreateSystemdUnitAsString() = %v, want %v", got, tt.want) + t.Errorf("CreateContainerSystemdUnit() = \n%v, want \n%v", got, tt.want) } }) } diff --git a/pkg/varlinkapi/generate.go b/pkg/varlinkapi/generate.go index 9dc20d582..19010097d 100644 --- a/pkg/varlinkapi/generate.go +++ b/pkg/varlinkapi/generate.go @@ -4,9 +4,9 @@ package varlinkapi import ( "encoding/json" + "github.com/containers/libpod/cmd/podman/shared" iopodman "github.com/containers/libpod/cmd/podman/varlink" - "github.com/containers/libpod/pkg/systemdgen" ) // GenerateKube ... @@ -29,24 +29,3 @@ func (i *LibpodAPI) GenerateKube(call iopodman.VarlinkCall, name string, service Service: string(servB), }) } - -// GenerateSystemd ... -func (i *LibpodAPI) GenerateSystemd(call iopodman.VarlinkCall, nameOrID, restart string, stopTimeout int64, useName bool) error { - ctr, err := i.Runtime.LookupContainer(nameOrID) - if err != nil { - return call.ReplyErrorOccurred(err.Error()) - } - timeout := int(ctr.StopTimeout()) - if stopTimeout >= 0 { - timeout = int(stopTimeout) - } - name := ctr.ID() - if useName { - name = ctr.Name() - } - unit, err := systemdgen.CreateSystemdUnitAsString(name, ctr.ID(), restart, ctr.Config().StaticDir, timeout) - if err != nil { - return call.ReplyErrorOccurred(err.Error()) - } - return call.ReplyGenerateSystemd(unit) -} diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go index fe7f11b4d..0bdbec177 100644 --- a/pkg/varlinkapi/images.go +++ b/pkg/varlinkapi/images.go @@ -142,7 +142,14 @@ func (i *LibpodAPI) BuildImage(call iopodman.VarlinkCall, config iopodman.BuildI return call.ReplyErrorOccurred(fmt.Sprintf("unable to untar context dir %s", contextDir)) } logrus.Debugf("untar of %s successful", contextDir) - + defer func() { + if err := os.Remove(contextDir); err != nil { + logrus.Errorf("unable to delete file '%s': %q", contextDir, err) + } + if err := os.RemoveAll(newContextDir); err != nil { + logrus.Errorf("unable to delete directory '%s': %q", newContextDir, err) + } + }() // All output (stdout, stderr) is captured in output as well var output bytes.Buffer @@ -331,7 +338,7 @@ func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, compr dockerRegistryOptions := image.DockerRegistryOptions{} if format != "" { switch format { - case "oci": //nolint + case "oci": // nolint manifestType = v1.MediaTypeImageManifest case "v2s1": manifestType = manifest.DockerV2Schema1SignedMediaType @@ -353,7 +360,12 @@ func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, compr output := bytes.NewBuffer([]byte{}) c := make(chan error) go func() { - err := newImage.PushImageToHeuristicDestination(getContext(), destname, manifestType, "", "", output, compress, so, &dockerRegistryOptions, nil) + writer := bytes.NewBuffer([]byte{}) + err := newImage.PushImageToHeuristicDestination(getContext(), destname, manifestType, "", "", "", writer, compress, so, &dockerRegistryOptions, nil) + if err != nil { + c <- err + } + _, err = io.CopyBuffer(output, writer, nil) c <- err close(c) }() @@ -381,6 +393,7 @@ func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, compr } br := iopodman.MoreResponse{ Logs: log, + Id: newImage.ID(), } call.ReplyPushImage(br) log = []string{} @@ -396,6 +409,7 @@ func (i *LibpodAPI) PushImage(call iopodman.VarlinkCall, name, tag string, compr br := iopodman.MoreResponse{ Logs: log, + Id: newImage.ID(), } return call.ReplyPushImage(br) } @@ -523,7 +537,7 @@ func (i *LibpodAPI) Commit(call iopodman.VarlinkCall, name, imageName string, ch } sc := image.GetSystemContext(rtc.SignaturePolicyPath, "", false) switch manifestType { - case "oci", "": //nolint + case "oci", "": // nolint mimeType = buildah.OCIv1ImageManifest case "docker": mimeType = manifest.DockerV2Schema2MediaType @@ -549,7 +563,6 @@ func (i *LibpodAPI) Commit(call iopodman.VarlinkCall, name, imageName string, ch } c := make(chan error) - defer close(c) go func() { newImage, err = ctr.Commit(getContext(), imageName, options) @@ -557,6 +570,7 @@ func (i *LibpodAPI) Commit(call iopodman.VarlinkCall, name, imageName string, ch c <- err } c <- nil + close(c) }() // reply is the func being sent to the output forwarder. in this case it is replying @@ -615,7 +629,7 @@ func (i *LibpodAPI) ExportImage(call iopodman.VarlinkCall, name, destination str return err } - if err := newImage.PushImageToHeuristicDestination(getContext(), destination, "", "", "", nil, compress, image.SigningOptions{}, &image.DockerRegistryOptions{}, additionalTags); err != nil { + if err := newImage.PushImageToHeuristicDestination(getContext(), destination, "", "", "", "", nil, compress, image.SigningOptions{}, &image.DockerRegistryOptions{}, additionalTags); err != nil { return call.ReplyErrorOccurred(err.Error()) } return call.ReplyExportImage(newImage.ID()) @@ -814,7 +828,7 @@ func (i *LibpodAPI) ImageSave(call iopodman.VarlinkCall, options iopodman.ImageS // Image has been saved to `output` if outputToDir { // If the output is a directory, we need to tar up the directory to send it back - //Create a tempfile for the directory tarball + // Create a tempfile for the directory tarball outputFile, err := ioutil.TempFile("", "varlink_save_dir") if err != nil { return err diff --git a/test/README.md b/test/README.md index 9bea679dc..d7710cc95 100644 --- a/test/README.md +++ b/test/README.md @@ -39,8 +39,9 @@ The following instructions assume your GOPATH is ~/go. Adjust as needed for your environment. ### Installing ginkgo -Build ginkgo and install it under $GOPATH/bin with the following command: +Build ginkgo and install it under $GOPATH/bin with the following commands: ``` +export GOCACHE="$(mktemp -d)" GOPATH=~/go make .install.ginkgo ``` If your PATH does not include $GOPATH/bin, you might consider adding it. diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go index 5bb040206..314743a92 100644 --- a/test/e2e/generate_systemd_test.go +++ b/test/e2e/generate_systemd_test.go @@ -3,10 +3,11 @@ package integration import ( + "os" + . "github.com/containers/libpod/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "os" ) var _ = Describe("Podman generate systemd", func() { @@ -33,7 +34,7 @@ var _ = Describe("Podman generate systemd", func() { }) - It("podman generate systemd on bogus container", func() { + It("podman generate systemd on bogus container/pod", func() { session := podmanTest.Podman([]string{"generate", "systemd", "foobar"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Not(Equal(0))) @@ -51,6 +52,19 @@ var _ = Describe("Podman generate systemd", func() { Expect(session.ExitCode()).To(Not(Equal(0))) }) + It("podman generate systemd good timeout value", func() { + session := podmanTest.Podman([]string{"create", "--name", "foobar", "alpine", "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"generate", "systemd", "--timeout", "1234", "foobar"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + found, _ := session.GrepString(" stop -t 1234 ") + Expect(found).To(BeTrue()) + }) + It("podman generate systemd", func() { n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", nginx}) n.WaitWithDefaultTimeout() @@ -61,6 +75,23 @@ var _ = Describe("Podman generate systemd", func() { Expect(session.ExitCode()).To(Equal(0)) }) + It("podman generate systemd --files --name", func() { + n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", nginx}) + n.WaitWithDefaultTimeout() + Expect(n.ExitCode()).To(Equal(0)) + + session := podmanTest.Podman([]string{"generate", "systemd", "--files", "--name", "nginx"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + for _, file := range session.OutputToStringArray() { + os.Remove(file) + } + + found, _ := session.GrepString("/container-nginx.service") + Expect(found).To(BeTrue()) + }) + It("podman generate systemd with timeout", func() { n := podmanTest.Podman([]string{"run", "--name", "nginx", "-dt", nginx}) n.WaitWithDefaultTimeout() @@ -69,6 +100,81 @@ var _ = Describe("Podman generate systemd", func() { session := podmanTest.Podman([]string{"generate", "systemd", "--timeout", "5", "nginx"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) + + found, _ := session.GrepString("podman stop -t 5") + Expect(found).To(BeTrue()) }) + It("podman generate systemd pod --name", func() { + n := podmanTest.Podman([]string{"pod", "create", "--name", "foo"}) + n.WaitWithDefaultTimeout() + Expect(n.ExitCode()).To(Equal(0)) + + n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-1", "alpine", "top"}) + n.WaitWithDefaultTimeout() + Expect(n.ExitCode()).To(Equal(0)) + + n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-2", "alpine", "top"}) + n.WaitWithDefaultTimeout() + Expect(n.ExitCode()).To(Equal(0)) + + session := podmanTest.Podman([]string{"generate", "systemd", "--timeout", "42", "--name", "foo"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + // Grepping the output (in addition to unit tests) + found, _ := session.GrepString("# pod-foo.service") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString("Requires=container-foo-1.service container-foo-2.service") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString("# container-foo-1.service") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString(" start foo-1") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString("-infra") // infra container + Expect(found).To(BeTrue()) + + found, _ = session.GrepString("# container-foo-2.service") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString(" stop -t 42 foo-2") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString("BindsTo=pod-foo.service") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString("PIDFile=") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString("/userdata/conmon.pid") + Expect(found).To(BeTrue()) + }) + + It("podman generate systemd pod --name --files", func() { + n := podmanTest.Podman([]string{"pod", "create", "--name", "foo"}) + n.WaitWithDefaultTimeout() + Expect(n.ExitCode()).To(Equal(0)) + + n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-1", "alpine", "top"}) + n.WaitWithDefaultTimeout() + Expect(n.ExitCode()).To(Equal(0)) + + session := podmanTest.Podman([]string{"generate", "systemd", "--name", "--files", "foo"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + for _, file := range session.OutputToStringArray() { + os.Remove(file) + } + + found, _ := session.GrepString("/pod-foo.service") + Expect(found).To(BeTrue()) + + found, _ = session.GrepString("/container-foo-1.service") + Expect(found).To(BeTrue()) + }) }) diff --git a/test/e2e/push_test.go b/test/e2e/push_test.go index cf6279f2f..4360eeece 100644 --- a/test/e2e/push_test.go +++ b/test/e2e/push_test.go @@ -76,6 +76,14 @@ var _ = Describe("Podman push", func() { push := podmanTest.PodmanNoCache([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) push.WaitWithDefaultTimeout() Expect(push.ExitCode()).To(Equal(0)) + + // Test --digestfile option + push2 := podmanTest.PodmanNoCache([]string{"push", "--tls-verify=false", "--digestfile=/tmp/digestfile.txt", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"}) + push2.WaitWithDefaultTimeout() + fi, err := os.Lstat("/tmp/digestfile.txt") + Expect(err).To(BeNil()) + Expect(fi.Name()).To(Equal("digestfile.txt")) + Expect(push2.ExitCode()).To(Equal(0)) }) It("podman push to local registry with authorization", func() { diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go index 1e0b84310..abb93a149 100644 --- a/test/e2e/run_volume_test.go +++ b/test/e2e/run_volume_test.go @@ -154,4 +154,12 @@ var _ = Describe("Podman run with volumes", func() { session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Not(Equal(0))) }) + + It("podman run with volume flag and multiple named volumes", func() { + session := podmanTest.Podman([]string{"run", "--rm", "-v", "testvol1:/testvol1", "-v", "testvol2:/testvol2", ALPINE, "grep", "/testvol", "/proc/self/mountinfo"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("/testvol1")) + Expect(session.OutputToString()).To(ContainSubstring("/testvol2")) + }) }) diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go index 91604867d..02778d493 100644 --- a/test/e2e/systemd_test.go +++ b/test/e2e/systemd_test.go @@ -5,7 +5,10 @@ package integration import ( "io/ioutil" "os" + "strings" + "time" + "github.com/containers/libpod/pkg/cgroups" . "github.com/containers/libpod/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -77,4 +80,49 @@ WantedBy=multi-user.target status := SystemExec("bash", []string{"-c", "systemctl status redis"}) Expect(status.OutputToString()).To(ContainSubstring("active (running)")) }) + + It("podman run container with systemd PID1", func() { + cgroupsv2, err := cgroups.IsCgroup2UnifiedMode() + Expect(err).To(BeNil()) + if cgroupsv2 { + Skip("systemd test does not work in cgroups V2 mode yet") + } + + systemdImage := "fedora" + pull := podmanTest.Podman([]string{"pull", systemdImage}) + pull.WaitWithDefaultTimeout() + Expect(pull.ExitCode()).To(Equal(0)) + + ctrName := "testSystemd" + run := podmanTest.Podman([]string{"run", "--name", ctrName, "-t", "-i", "-d", systemdImage, "init"}) + run.WaitWithDefaultTimeout() + Expect(run.ExitCode()).To(Equal(0)) + ctrID := run.OutputToString() + + logs := podmanTest.Podman([]string{"logs", ctrName}) + logs.WaitWithDefaultTimeout() + Expect(logs.ExitCode()).To(Equal(0)) + + // Give container 10 seconds to start + started := false + for i := 0; i < 10; i++ { + runningCtrs := podmanTest.Podman([]string{"ps", "-q", "--no-trunc"}) + runningCtrs.WaitWithDefaultTimeout() + Expect(runningCtrs.ExitCode()).To(Equal(0)) + + if strings.Contains(runningCtrs.OutputToString(), ctrID) { + started = true + break + } + + time.Sleep(1 * time.Second) + } + + Expect(started).To(BeTrue()) + + systemctl := podmanTest.Podman([]string{"exec", "-t", "-i", ctrName, "systemctl", "status", "--no-pager"}) + systemctl.WaitWithDefaultTimeout() + Expect(systemctl.ExitCode()).To(Equal(0)) + Expect(strings.Contains(systemctl.OutputToString(), "State:")).To(BeTrue()) + }) }) diff --git a/test/endpoint/commit.go b/test/endpoint/commit.go new file mode 100644 index 000000000..476ac6ca3 --- /dev/null +++ b/test/endpoint/commit.go @@ -0,0 +1,47 @@ +package endpoint + +import ( + "encoding/json" + "os" + + . "github.com/containers/libpod/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Podman commit", func() { + var ( + tempdir string + err error + endpointTest *EndpointTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + endpointTest = Setup(tempdir) + endpointTest.StartVarlinkWithCache() + }) + + AfterEach(func() { + endpointTest.Cleanup() + + }) + + It("ensure commit with uppercase image name does not panic", func() { + body := make(map[string]string) + body["image_name"] = "FOO" + body["format"] = "oci" + body["name"] = "top" + b, err := json.Marshal(body) + Expect(err).To(BeNil()) + // run the container to be committed + _ = endpointTest.startTopContainer("top") + result := endpointTest.Varlink("Commit", string(b), false) + // This indicates an error occured + Expect(len(result.StdErrToString())).To(BeNumerically(">", 0)) + }) + +}) diff --git a/test/endpoint/config.go b/test/endpoint/config.go new file mode 100644 index 000000000..15ee23547 --- /dev/null +++ b/test/endpoint/config.go @@ -0,0 +1,22 @@ +package endpoint + +import "encoding/json" + +var ( + STORAGE_FS = "vfs" + STORAGE_OPTIONS = "--storage-driver vfs" + ROOTLESS_STORAGE_FS = "vfs" + ROOTLESS_STORAGE_OPTIONS = "--storage-driver vfs" + CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, nginx, redis, registry, infra, labels} + nginx = "quay.io/libpod/alpine_nginx:latest" + BB_GLIBC = "docker.io/library/busybox:glibc" + registry = "docker.io/library/registry:2" + labels = "quay.io/libpod/alpine_labels:latest" +) + +func makeNameMessage(name string) string { + n := make(map[string]string) + n["name"] = name + b, _ := json.Marshal(n) + return string(b) +} diff --git a/test/endpoint/endpoint.go b/test/endpoint/endpoint.go new file mode 100644 index 000000000..78aa957ab --- /dev/null +++ b/test/endpoint/endpoint.go @@ -0,0 +1,223 @@ +package endpoint + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "time" + + iopodman "github.com/containers/libpod/cmd/podman/varlink" + "github.com/containers/libpod/pkg/rootless" + . "github.com/onsi/ginkgo" + "github.com/onsi/gomega/gexec" +) + +var ( + ARTIFACT_DIR = "/tmp/.artifacts" + CGROUP_MANAGER = "systemd" + defaultWaitTimeout = 90 + //RESTORE_IMAGES = []string{ALPINE, BB} + INTEGRATION_ROOT string + ImageCacheDir = "/tmp/podman/imagecachedir" + VarlinkBinary = "/usr/bin/varlink" + ALPINE = "docker.io/library/alpine:latest" + infra = "k8s.gcr.io/pause:3.1" + BB = "docker.io/library/busybox:latest" + redis = "docker.io/library/redis:alpine" + fedoraMinimal = "registry.fedoraproject.org/fedora-minimal:latest" +) + +type EndpointTestIntegration struct { + ArtifactPath string + CNIConfigDir string + CgroupManager string + ConmonBinary string + CrioRoot string + //Host HostOS + ImageCacheDir string + ImageCacheFS string + OCIRuntime string + PodmanBinary string + RemoteTest bool + RunRoot string + SignaturePolicyPath string + StorageOptions string + TmpDir string + Timings []string + VarlinkBinary string + VarlinkCommand *exec.Cmd + VarlinkEndpoint string + VarlinkSession *os.Process +} + +func (p *EndpointTestIntegration) StartVarlink() { + p.startVarlink(false) +} + +func (p *EndpointTestIntegration) StartVarlinkWithCache() { + p.startVarlink(true) +} + +func (p *EndpointTestIntegration) startVarlink(useImageCache bool) { + var ( + counter int + ) + if os.Geteuid() == 0 { + os.MkdirAll("/run/podman", 0755) + } + varlinkEndpoint := p.VarlinkEndpoint + //p.SetVarlinkAddress(p.VarlinkEndpoint) + + args := []string{"varlink", "--timeout", "0", varlinkEndpoint} + podmanOptions := getVarlinkOptions(p, args) + if useImageCache { + cacheOptions := []string{"--storage-opt", fmt.Sprintf("%s.imagestore=%s", p.ImageCacheFS, p.ImageCacheDir)} + podmanOptions = append(cacheOptions, podmanOptions...) + } + command := exec.Command(p.PodmanBinary, podmanOptions...) + fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " ")) + command.Start() + command.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + p.VarlinkCommand = command + p.VarlinkSession = command.Process + for { + if result := p.endpointReady(); result == 0 { + break + } + fmt.Println("Waiting for varlink connection to become active", counter) + time.Sleep(250 * time.Millisecond) + counter++ + if counter > 40 { + Fail("varlink endpoint never became ready") + } + } +} + +func (p *EndpointTestIntegration) endpointReady() int { + session := p.Varlink("GetVersion", "", false) + return session.ExitCode() +} + +func (p *EndpointTestIntegration) StopVarlink() { + var out bytes.Buffer + var pids []int + varlinkSession := p.VarlinkSession + + if !rootless.IsRootless() { + if err := varlinkSession.Kill(); err != nil { + fmt.Fprintf(os.Stderr, "error on varlink stop-kill %q", err) + } + if _, err := varlinkSession.Wait(); err != nil { + fmt.Fprintf(os.Stderr, "error on varlink stop-wait %q", err) + } + + } else { + //p.ResetVarlinkAddress() + parentPid := fmt.Sprintf("%d", p.VarlinkSession.Pid) + pgrep := exec.Command("pgrep", "-P", parentPid) + fmt.Printf("running: pgrep %s\n", parentPid) + pgrep.Stdout = &out + err := pgrep.Run() + if err != nil { + fmt.Fprint(os.Stderr, "unable to find varlink pid") + } + + for _, s := range strings.Split(out.String(), "\n") { + if len(s) == 0 { + continue + } + p, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to convert %s to int", s) + } + if p != 0 { + pids = append(pids, p) + } + } + + pids = append(pids, p.VarlinkSession.Pid) + for _, pid := range pids { + syscall.Kill(pid, syscall.SIGKILL) + } + } + socket := strings.Split(p.VarlinkEndpoint, ":")[1] + if err := os.Remove(socket); err != nil { + fmt.Println(err) + } +} + +type EndpointSession struct { + *gexec.Session +} + +func getVarlinkOptions(p *EndpointTestIntegration, args []string) []string { + podmanOptions := strings.Split(fmt.Sprintf("--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s", + p.CrioRoot, p.RunRoot, p.OCIRuntime, p.ConmonBinary, p.CNIConfigDir, p.CgroupManager), " ") + if os.Getenv("HOOK_OPTION") != "" { + podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION")) + } + podmanOptions = append(podmanOptions, strings.Split(p.StorageOptions, " ")...) + podmanOptions = append(podmanOptions, args...) + return podmanOptions +} + +func (p *EndpointTestIntegration) Varlink(endpoint, message string, more bool) *EndpointSession { + //call unix:/run/user/1000/podman/io.podman/io.podman.GetContainerStats '{"name": "foobar" }' + var ( + command *exec.Cmd + ) + + args := []string{"call"} + if more { + args = append(args, "-m") + } + args = append(args, []string{fmt.Sprintf("%s/io.podman.%s", p.VarlinkEndpoint, endpoint)}...) + if len(message) > 0 { + args = append(args, message) + } + command = exec.Command(p.VarlinkBinary, args...) + session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) + if err != nil { + Fail(fmt.Sprintf("unable to run varlink command: %s\n%v", strings.Join(args, " "), err)) + } + session.Wait(defaultWaitTimeout) + return &EndpointSession{session} +} + +func (s *EndpointSession) StdErrToString() string { + fields := strings.Fields(fmt.Sprintf("%s", s.Err.Contents())) + return strings.Join(fields, " ") +} + +func (s *EndpointSession) OutputToString() string { + fields := strings.Fields(fmt.Sprintf("%s", s.Out.Contents())) + return strings.Join(fields, " ") +} + +func (s *EndpointSession) OutputToBytes() []byte { + out := s.OutputToString() + return []byte(out) +} + +func (s *EndpointSession) OutputToStringMap() map[string]string { + var out map[string]string + json.Unmarshal(s.OutputToBytes(), &out) + return out +} + +func (s *EndpointSession) OutputToMapToInt() map[string]int { + var out map[string]int + json.Unmarshal(s.OutputToBytes(), &out) + return out +} + +func (s *EndpointSession) OutputToMoreResponse() iopodman.MoreResponse { + out := make(map[string]iopodman.MoreResponse) + json.Unmarshal(s.OutputToBytes(), &out) + return out["reply"] +} diff --git a/test/endpoint/endpoint_suite_test.go b/test/endpoint/endpoint_suite_test.go new file mode 100644 index 000000000..401da94c2 --- /dev/null +++ b/test/endpoint/endpoint_suite_test.go @@ -0,0 +1,70 @@ +package endpoint + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestEndpoint(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Endpoint Suite") +} + +var LockTmpDir string + +var _ = SynchronizedBeforeSuite(func() []byte { + // Cache images + cwd, _ := os.Getwd() + INTEGRATION_ROOT = filepath.Join(cwd, "../../") + podman := Setup("/tmp") + podman.ArtifactPath = ARTIFACT_DIR + if _, err := os.Stat(ARTIFACT_DIR); os.IsNotExist(err) { + if err = os.Mkdir(ARTIFACT_DIR, 0777); err != nil { + fmt.Printf("%q\n", err) + os.Exit(1) + } + } + + // make cache dir + if err := os.MkdirAll(ImageCacheDir, 0777); err != nil { + fmt.Printf("%q\n", err) + os.Exit(1) + } + + podman.StartVarlink() + for _, image := range CACHE_IMAGES { + podman.createArtifact(image) + } + podman.StopVarlink() + // If running localized tests, the cache dir is created and populated. if the + // tests are remote, this is a no-op + populateCache(podman) + + path, err := ioutil.TempDir("", "libpodlock") + if err != nil { + fmt.Println(err) + os.Exit(1) + } + return []byte(path) +}, func(data []byte) { + LockTmpDir = string(data) +}) + +var _ = SynchronizedAfterSuite(func() {}, + func() { + podman := Setup("/tmp") + if err := os.RemoveAll(podman.CrioRoot); err != nil { + fmt.Printf("%q\n", err) + os.Exit(1) + } + if err := os.RemoveAll(podman.ImageCacheDir); err != nil { + fmt.Printf("%q\n", err) + os.Exit(1) + } + }) diff --git a/test/endpoint/exists_test.go b/test/endpoint/exists_test.go new file mode 100644 index 000000000..17e252a65 --- /dev/null +++ b/test/endpoint/exists_test.go @@ -0,0 +1,66 @@ +package endpoint + +import ( + "os" + + . "github.com/containers/libpod/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Podman exists", func() { + var ( + tempdir string + err error + endpointTest *EndpointTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + endpointTest = Setup(tempdir) + endpointTest.StartVarlinkWithCache() + }) + + AfterEach(func() { + endpointTest.Cleanup() + //f := CurrentGinkgoTestDescription() + //processTestResult(f) + + }) + + It("image exists in local storage", func() { + result := endpointTest.Varlink("ImageExists", makeNameMessage(ALPINE), false) + Expect(result.ExitCode()).To(BeZero()) + + output := result.OutputToMapToInt() + Expect(output["exists"]).To(BeZero()) + }) + + It("image exists in local storage by shortname", func() { + result := endpointTest.Varlink("ImageExists", makeNameMessage("alpine"), false) + Expect(result.ExitCode()).To(BeZero()) + + output := result.OutputToMapToInt() + Expect(output["exists"]).To(BeZero()) + }) + + It("image does not exist in local storage", func() { + result := endpointTest.Varlink("ImageExists", makeNameMessage("alpineforest"), false) + Expect(result.ExitCode()).To(BeZero()) + + output := result.OutputToMapToInt() + Expect(output["exists"]).To(Equal(1)) + }) + + It("container exists in local storage by name", func() { + _ = endpointTest.startTopContainer("top") + result := endpointTest.Varlink("ContainerExists", makeNameMessage("top"), false) + Expect(result.ExitCode()).To(BeZero()) + output := result.OutputToMapToInt() + Expect(output["exists"]).To(BeZero()) + }) + +}) diff --git a/test/endpoint/pull_test.go b/test/endpoint/pull_test.go new file mode 100644 index 000000000..51eb9c760 --- /dev/null +++ b/test/endpoint/pull_test.go @@ -0,0 +1,44 @@ +package endpoint + +import ( + "os" + + . "github.com/containers/libpod/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Podman pull", func() { + var ( + tempdir string + err error + endpointTest *EndpointTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + endpointTest = Setup(tempdir) + endpointTest.StartVarlink() + }) + + AfterEach(func() { + endpointTest.Cleanup() + //f := CurrentGinkgoTestDescription() + //processTestResult(f) + + }) + + It("podman pull", func() { + session := endpointTest.Varlink("PullImage", makeNameMessage(ALPINE), false) + Expect(session.ExitCode()).To(BeZero()) + + result := endpointTest.Varlink("ImageExists", makeNameMessage(ALPINE), false) + Expect(result.ExitCode()).To(BeZero()) + + output := result.OutputToMapToInt() + Expect(output["exists"]).To(BeZero()) + }) +}) diff --git a/test/endpoint/setup.go b/test/endpoint/setup.go new file mode 100644 index 000000000..727f29ec6 --- /dev/null +++ b/test/endpoint/setup.go @@ -0,0 +1,219 @@ +package endpoint + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + iopodman "github.com/containers/libpod/cmd/podman/varlink" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/storage/pkg/stringid" + "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" +) + +func Setup(tempDir string) *EndpointTestIntegration { + var ( + endpoint string + ) + cwd, _ := os.Getwd() + INTEGRATION_ROOT = filepath.Join(cwd, "../../") + + podmanBinary := filepath.Join(cwd, "../../bin/podman") + if os.Getenv("PODMAN_BINARY") != "" { + podmanBinary = os.Getenv("PODMAN_BINARY") + } + conmonBinary := filepath.Join("/usr/libexec/podman/conmon") + altConmonBinary := "/usr/bin/conmon" + if _, err := os.Stat(conmonBinary); os.IsNotExist(err) { + conmonBinary = altConmonBinary + } + if os.Getenv("CONMON_BINARY") != "" { + conmonBinary = os.Getenv("CONMON_BINARY") + } + storageOptions := STORAGE_OPTIONS + if os.Getenv("STORAGE_OPTIONS") != "" { + storageOptions = os.Getenv("STORAGE_OPTIONS") + } + cgroupManager := CGROUP_MANAGER + if rootless.IsRootless() { + cgroupManager = "cgroupfs" + } + if os.Getenv("CGROUP_MANAGER") != "" { + cgroupManager = os.Getenv("CGROUP_MANAGER") + } + + ociRuntime := os.Getenv("OCI_RUNTIME") + if ociRuntime == "" { + var err error + ociRuntime, err = exec.LookPath("runc") + // If we cannot find the runc binary, setting to something static as we have no way + // to return an error. The tests will fail and point out that the runc binary could + // not be found nicely. + if err != nil { + ociRuntime = "/usr/bin/runc" + } + } + os.Setenv("DISABLE_HC_SYSTEMD", "true") + CNIConfigDir := "/etc/cni/net.d" + + storageFs := STORAGE_FS + if rootless.IsRootless() { + storageFs = ROOTLESS_STORAGE_FS + } + + uuid := stringid.GenerateNonCryptoID() + if !rootless.IsRootless() { + endpoint = fmt.Sprintf("unix:/run/podman/io.podman-%s", uuid) + } else { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + socket := fmt.Sprintf("io.podman-%s", uuid) + fqpath := filepath.Join(runtimeDir, socket) + endpoint = fmt.Sprintf("unix:%s", fqpath) + } + + eti := EndpointTestIntegration{ + ArtifactPath: ARTIFACT_DIR, + CNIConfigDir: CNIConfigDir, + CgroupManager: cgroupManager, + ConmonBinary: conmonBinary, + CrioRoot: filepath.Join(tempDir, "crio"), + ImageCacheDir: ImageCacheDir, + ImageCacheFS: storageFs, + OCIRuntime: ociRuntime, + PodmanBinary: podmanBinary, + RunRoot: filepath.Join(tempDir, "crio-run"), + SignaturePolicyPath: filepath.Join(INTEGRATION_ROOT, "test/policy.json"), + StorageOptions: storageOptions, + TmpDir: tempDir, + //Timings: nil, + VarlinkBinary: VarlinkBinary, + VarlinkCommand: nil, + VarlinkEndpoint: endpoint, + VarlinkSession: nil, + } + return &eti +} + +func (p *EndpointTestIntegration) Cleanup() { + // Remove all containers + // TODO Make methods to do all this? + + p.stopAllContainers() + + //TODO need to make stop all pods + + p.StopVarlink() + // Nuke tempdir + if err := os.RemoveAll(p.TmpDir); err != nil { + fmt.Printf("%q\n", err) + } + + // Clean up the registries configuration file ENV variable set in Create + resetRegistriesConfigEnv() +} + +func (p *EndpointTestIntegration) listContainers() []iopodman.Container { + containers := p.Varlink("ListContainers", "", false) + var varlinkContainers map[string][]iopodman.Container + if err := json.Unmarshal(containers.OutputToBytes(), &varlinkContainers); err != nil { + logrus.Error("failed to unmarshal containers") + } + return varlinkContainers["containers"] +} + +func (p *EndpointTestIntegration) stopAllContainers() { + containers := p.listContainers() + for _, container := range containers { + p.stopContainer(container.Id) + } +} + +func (p *EndpointTestIntegration) stopContainer(cid string) { + p.Varlink("StopContainer", fmt.Sprintf("{\"name\":\"%s\", \"timeout\":0}", cid), false) +} + +func resetRegistriesConfigEnv() { + os.Setenv("REGISTRIES_CONFIG_PATH", "") +} + +func (p *EndpointTestIntegration) createArtifact(image string) { + if os.Getenv("NO_TEST_CACHE") != "" { + return + } + dest := strings.Split(image, "/") + destName := fmt.Sprintf("/tmp/%s.tar", strings.Replace(strings.Join(strings.Split(dest[len(dest)-1], "/"), ""), ":", "-", -1)) + fmt.Printf("Caching %s at %s...", image, destName) + if _, err := os.Stat(destName); os.IsNotExist(err) { + pull := p.Varlink("PullImage", fmt.Sprintf("{\"name\":\"%s\"}", image), false) + Expect(pull.ExitCode()).To(Equal(0)) + + imageSave := iopodman.ImageSaveOptions{ + //Name:image, + //Output: destName, + //Format: "oci-archive", + } + imageSave.Name = image + imageSave.Output = destName + imageSave.Format = "oci-archive" + foo := make(map[string]iopodman.ImageSaveOptions) + foo["options"] = imageSave + f, _ := json.Marshal(foo) + save := p.Varlink("ImageSave", string(f), false) + result := save.OutputToMoreResponse() + Expect(save.ExitCode()).To(Equal(0)) + Expect(os.Rename(result.Id, destName)).To(BeNil()) + fmt.Printf("\n") + } else { + fmt.Printf(" already exists.\n") + } +} + +func populateCache(p *EndpointTestIntegration) { + p.CrioRoot = p.ImageCacheDir + p.StartVarlink() + for _, image := range CACHE_IMAGES { + p.RestoreArtifactToCache(image) + } + p.StopVarlink() +} + +func (p *EndpointTestIntegration) RestoreArtifactToCache(image string) error { + fmt.Printf("Restoring %s...\n", image) + dest := strings.Split(image, "/") + destName := fmt.Sprintf("/tmp/%s.tar", strings.Replace(strings.Join(strings.Split(dest[len(dest)-1], "/"), ""), ":", "-", -1)) + //fmt.Println(destName, p.ImageCacheDir) + load := p.Varlink("LoadImage", fmt.Sprintf("{\"name\": \"%s\", \"inputFile\": \"%s\"}", image, destName), false) + Expect(load.ExitCode()).To(BeZero()) + return nil +} + +func (p *EndpointTestIntegration) startTopContainer(name string) string { + t := true + args := iopodman.Create{ + Args: []string{"docker.io/library/alpine:latest", "top"}, + Tty: &t, + Detach: &t, + } + if len(name) > 0 { + args.Name = &name + } + b, err := json.Marshal(args) + if err != nil { + ginkgo.Fail("failed to marshal data for top container") + } + input := fmt.Sprintf("{\"create\":%s}", string(b)) + top := p.Varlink("CreateContainer", input, false) + if top.ExitCode() != 0 { + ginkgo.Fail("failed to start top container") + } + start := p.Varlink("StartContainer", fmt.Sprintf("{\"name\":\"%s\"}", name), false) + if start.ExitCode() != 0 { + ginkgo.Fail("failed to start top container") + } + return start.OutputToString() +} diff --git a/test/endpoint/version_test.go b/test/endpoint/version_test.go new file mode 100644 index 000000000..a1168da70 --- /dev/null +++ b/test/endpoint/version_test.go @@ -0,0 +1,41 @@ +package endpoint + +import ( + "os" + + . "github.com/containers/libpod/test/utils" + "github.com/containers/libpod/version" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Podman version", func() { + var ( + tempdir string + err error + endpointTest *EndpointTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + endpointTest = Setup(tempdir) + endpointTest.StartVarlink() + }) + + AfterEach(func() { + endpointTest.Cleanup() + //f := CurrentGinkgoTestDescription() + //processTestResult(f) + + }) + + It("podman version", func() { + session := endpointTest.Varlink("GetVersion", "", false) + result := session.OutputToStringMap() + Expect(result["version"]).To(Equal(version.Version)) + Expect(session.ExitCode()).To(Equal(0)) + }) +}) diff --git a/test/system/065-cp.bats b/test/system/065-cp.bats new file mode 100644 index 000000000..204065bdb --- /dev/null +++ b/test/system/065-cp.bats @@ -0,0 +1,229 @@ +#!/usr/bin/env bats -*- bats -*- +# +# Tests for 'podman cp' +# +# ASSUMPTION FOR ALL THESE TESTS: /tmp in the container starts off empty +# + +load helpers + +# Create two random-name random-content files in /tmp in the container +# podman-cp them into the host using '/tmp/*', i.e. asking podman to +# perform wildcard expansion in the container. We should get both +# files copied into the host. +@test "podman cp * - wildcard copy multiple files from container to host" { + skip_if_remote "podman-remote does not yet handle cp" + + srcdir=$PODMAN_TMPDIR/cp-test-in + dstdir=$PODMAN_TMPDIR/cp-test-out + mkdir -p $srcdir $dstdir + + rand_filename1=$(random_string 20) + rand_content1=$(random_string 50) + rand_filename2=$(random_string 20) + rand_content2=$(random_string 50) + + run_podman run --name cpcontainer $IMAGE sh -c \ + "echo $rand_content1 >/tmp/$rand_filename1; + echo $rand_content2 >/tmp/$rand_filename2" + + run_podman cp 'cpcontainer:/tmp/*' $dstdir + + test -e $dstdir/$rand_filename1 || die "file 1 not copied from container" + test -e $dstdir/$rand_filename2 || die "file 2 not copied from container" + + is "$(<$dstdir/$rand_filename1)" "$rand_content1" "content of file 1" + is "$(<$dstdir/$rand_filename2)" "$rand_content2" "content of file 2" + + run_podman rm cpcontainer +} + + +# Create a file on the host; make a symlink in the container pointing +# into host-only space. Try to podman-cp that symlink. It should fail. +@test "podman cp - will not recognize symlink pointing into host space" { + skip_if_remote "podman-remote does not yet handle cp" + skip "BROKEN: PLEASE ENABLE ONCE #3829 GETS FIXED" + + srcdir=$PODMAN_TMPDIR/cp-test-in + dstdir=$PODMAN_TMPDIR/cp-test-out + mkdir -p $srcdir $dstdir + echo "this file is on the host" >$srcdir/hostfile + + run_podman run --name cpcontainer $IMAGE \ + sh -c "ln -s $srcdir/hostfile /tmp/badlink" + # This should fail because, from the container's perspective, the symlink + # points to a nonexistent file + run_podman 125 cp 'cpcontainer:/tmp/*' $dstdir/ + + # FIXME: this might not be the exactly correct error message + is "$output" ".*error evaluating symlinks.*lstat.*no such file or dir" \ + "Expected error from copying invalid symlink" + + # make sure there are no files in dstdir + is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host" + + run_podman rm cpcontainer +} + + +# Issue #3829 - like the above, but with a level of indirection in the +# wildcard expansion: create a file on the host; create a symlink in +# the container named 'file1' pointing to this file; then another symlink +# in the container pointing to 'file*' (file star). Try to podman-cp +# this invalid double symlink. It must fail. +@test "podman cp - will not expand globs in host space (#3829)" { + skip_if_remote "podman-remote does not yet handle cp" + skip "BROKEN: PLEASE ENABLE ONCE #3829 GETS FIXED" + + srcdir=$PODMAN_TMPDIR/cp-test-in + dstdir=$PODMAN_TMPDIR/cp-test-out + mkdir -p $srcdir $dstdir + echo "This file is on the host" > $srcdir/hostfile + + run_podman run --name cpcontainer $IMAGE \ + sh -c "ln -s $srcdir/hostfile file1;ln -s file\* copyme" + run_podman 125 cp cpcontainer:copyme $dstdir + + is "$output" ".*error evaluating symlinks.*lstat.*no such file or dir" \ + "Expected error from copying invalid symlink" + + # make sure there are no files in dstdir + is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host" + + run_podman rm cpcontainer +} + + +# Another symlink into host space, this one named '*' (star). cp should fail. +@test "podman cp - will not expand wildcard" { + skip_if_remote "podman-remote does not yet handle cp" + + srcdir=$PODMAN_TMPDIR/cp-test-in + dstdir=$PODMAN_TMPDIR/cp-test-out + mkdir -p $srcdir $dstdir + echo "This file lives on the host" > $srcdir/hostfile + + run_podman run --name cpcontainer $IMAGE \ + sh -c "ln -s $srcdir/hostfile /tmp/\*" + run_podman 125 cp 'cpcontainer:/tmp/*' $dstdir + + is "$output" ".*error evaluating symlinks.*lstat.*no such file or dir" \ + "Expected error from copying invalid symlink" + + # dstdir must be empty + is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host" + + run_podman rm cpcontainer +} + +############################################################################### +# cp INTO container + +# THIS IS EXTREMELY WEIRD. Podman expands symlinks in weird ways. +@test "podman cp into container: weird symlink expansion" { + skip_if_remote "podman-remote does not yet handle cp" + + srcdir=$PODMAN_TMPDIR/cp-test-in + dstdir=$PODMAN_TMPDIR/cp-test-out + mkdir -p $srcdir $dstdir + + rand_filename1=$(random_string 20) + rand_content1=$(random_string 50) + echo $rand_content1 > $srcdir/$rand_filename1 + + rand_filename2=$(random_string 20) + rand_content2=$(random_string 50) + echo $rand_content2 > $srcdir/$rand_filename2 + + rand_filename3=$(random_string 20) + rand_content3=$(random_string 50) + echo $rand_content3 > $srcdir/$rand_filename3 + + # Create tmp subdirectories in container, most with an invalid 'x' symlink + # Keep container running so we can exec into it. + run_podman run -d --name cpcontainer $IMAGE \ + sh -c "mkdir /tmp/d1;ln -s /tmp/nonesuch1 /tmp/d1/x; + mkdir /tmp/d2;ln -s /tmp/nonesuch2 /tmp/d2/x; + mkdir /tmp/d3; + trap 'exit 0' 15;while :;do sleep 0.5;done" + + # Copy file from host into container, into a file named 'x' + # Note that the second has a trailing slash; this will trigger mkdir + run_podman cp $srcdir/$rand_filename1 cpcontainer:/tmp/d1/x + is "$output" "" "output from podman cp 1" + + run_podman cp $srcdir/$rand_filename2 cpcontainer:/tmp/d2/x/ + is "$output" "" "output from podman cp 3" + + run_podman cp $srcdir/$rand_filename3 cpcontainer:/tmp/d3/x + is "$output" "" "output from podman cp 3" + + # Read back. + # In the first case, podman actually creates the file nonesuch1 (i.e. + # podman expands 'x -> nonesuch1' and, instead of overwriting x, + # creates an actual file). + run_podman exec cpcontainer cat /tmp/nonesuch1 + is "$output" "$rand_content1" "cp creates destination file" + + # In the second case, podman creates a directory nonesuch2, then + # creates a file with the same name as the input file. THIS IS WEIRD! + run_podman exec cpcontainer cat /tmp/nonesuch2/$rand_filename2 + is "$output" "$rand_content2" "cp creates destination dir and file" + + # In the third case, podman (correctly imo) creates a file named 'x' + run_podman exec cpcontainer cat /tmp/d3/x + is "$output" "$rand_content3" "cp creates file named x" + + run_podman rm -f cpcontainer + + +} + + +# rhbz1741718 : file copied into container:/var/lib/foo appears as /foo +# (docker only, never seems to have affected podman. Make sure it never does). +@test "podman cp into a subdirectory matching GraphRoot" { + skip_if_remote "podman-remote does not yet handle cp" + + # Create tempfile with random name and content + srcdir=$PODMAN_TMPDIR/cp-test-in + mkdir -p $srcdir + rand_filename=$(random_string 20) + rand_content=$(random_string 50) + echo $rand_content > $srcdir/$rand_filename + chmod 644 $srcdir/$rand_filename + + # Determine path to podman storage (eg /var/lib/c/s, or $HOME/.local/...) + run_podman info --format '{{.store.GraphRoot}}' + graphroot=$output + + # Create that directory in the container, and sleep (to keep container + # running, so we can exec into it). The trap/while is so podman-rm will + # run quickly instead of taking 10 seconds. + run_podman run -d --name cpcontainer $IMAGE sh -c \ + "mkdir -p $graphroot; trap 'exit 0' 15;while :;do sleep 0.5;done" + + # Copy from host into container. + run_podman cp $srcdir/$rand_filename cpcontainer:$graphroot/$rand_filename + + # ls, and confirm it's there. + run_podman exec cpcontainer ls -l $graphroot/$rand_filename + is "$output" "-rw-r--r-- .* 1 .* root .* 51 .* $graphroot/$rand_filename" \ + "File is copied into container in the correct (full) path" + + # Confirm it has the expected content (this is unlikely to ever fail) + run_podman exec cpcontainer cat $graphroot/$rand_filename + is "$output" "$rand_content" "Contents of file copied into container" + + run_podman rm -f cpcontainer +} + + +function teardown() { + # In case any test fails, clean up the container we left behind + run_podman rm -f cpcontainer + basic_teardown +} + +# vim: filetype=sh diff --git a/vendor/github.com/coreos/go-iptables/LICENSE b/vendor/github.com/coreos/go-iptables/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/github.com/coreos/go-iptables/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-iptables/NOTICE b/vendor/github.com/coreos/go-iptables/NOTICE deleted file mode 100644 index 23a0ada2f..000000000 --- a/vendor/github.com/coreos/go-iptables/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go deleted file mode 100644 index 9601bc78a..000000000 --- a/vendor/github.com/coreos/go-iptables/iptables/iptables.go +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package iptables - -import ( - "bytes" - "fmt" - "io" - "net" - "os/exec" - "regexp" - "strconv" - "strings" - "syscall" -) - -// Adds the output of stderr to exec.ExitError -type Error struct { - exec.ExitError - cmd exec.Cmd - msg string - proto Protocol - exitStatus *int //for overriding -} - -func (e *Error) ExitStatus() int { - if e.exitStatus != nil { - return *e.exitStatus - } - return e.Sys().(syscall.WaitStatus).ExitStatus() -} - -func (e *Error) Error() string { - return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) -} - -// IsNotExist returns true if the error is due to the chain or rule not existing -func (e *Error) IsNotExist() bool { - return e.ExitStatus() == 1 && - (e.msg == fmt.Sprintf("%s: Bad rule (does a matching rule exist in that chain?).\n", getIptablesCommand(e.proto)) || - e.msg == fmt.Sprintf("%s: No chain/target/match by that name.\n", getIptablesCommand(e.proto))) -} - -// Protocol to differentiate between IPv4 and IPv6 -type Protocol byte - -const ( - ProtocolIPv4 Protocol = iota - ProtocolIPv6 -) - -type IPTables struct { - path string - proto Protocol - hasCheck bool - hasWait bool - hasRandomFully bool - v1 int - v2 int - v3 int - mode string // the underlying iptables operating mode, e.g. nf_tables -} - -// New creates a new IPTables. -// For backwards compatibility, this always uses IPv4, i.e. "iptables". -func New() (*IPTables, error) { - return NewWithProtocol(ProtocolIPv4) -} - -// New creates a new IPTables for the given proto. -// The proto will determine which command is used, either "iptables" or "ip6tables". -func NewWithProtocol(proto Protocol) (*IPTables, error) { - path, err := exec.LookPath(getIptablesCommand(proto)) - if err != nil { - return nil, err - } - vstring, err := getIptablesVersionString(path) - v1, v2, v3, mode, err := extractIptablesVersion(vstring) - - checkPresent, waitPresent, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) - - ipt := IPTables{ - path: path, - proto: proto, - hasCheck: checkPresent, - hasWait: waitPresent, - hasRandomFully: randomFullyPresent, - v1: v1, - v2: v2, - v3: v3, - mode: mode, - } - return &ipt, nil -} - -// Proto returns the protocol used by this IPTables. -func (ipt *IPTables) Proto() Protocol { - return ipt.proto -} - -// Exists checks if given rulespec in specified table/chain exists -func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) { - if !ipt.hasCheck { - return ipt.existsForOldIptables(table, chain, rulespec) - - } - cmd := append([]string{"-t", table, "-C", chain}, rulespec...) - err := ipt.run(cmd...) - eerr, eok := err.(*Error) - switch { - case err == nil: - return true, nil - case eok && eerr.ExitStatus() == 1: - return false, nil - default: - return false, err - } -} - -// Insert inserts rulespec to specified table/chain (in specified pos) -func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error { - cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...) - return ipt.run(cmd...) -} - -// Append appends rulespec to specified table/chain -func (ipt *IPTables) Append(table, chain string, rulespec ...string) error { - cmd := append([]string{"-t", table, "-A", chain}, rulespec...) - return ipt.run(cmd...) -} - -// AppendUnique acts like Append except that it won't add a duplicate -func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error { - exists, err := ipt.Exists(table, chain, rulespec...) - if err != nil { - return err - } - - if !exists { - return ipt.Append(table, chain, rulespec...) - } - - return nil -} - -// Delete removes rulespec in specified table/chain -func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { - cmd := append([]string{"-t", table, "-D", chain}, rulespec...) - return ipt.run(cmd...) -} - -// List rules in specified table/chain -func (ipt *IPTables) List(table, chain string) ([]string, error) { - args := []string{"-t", table, "-S", chain} - return ipt.executeList(args) -} - -// List rules (with counters) in specified table/chain -func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) { - args := []string{"-t", table, "-v", "-S", chain} - return ipt.executeList(args) -} - -// ListChains returns a slice containing the name of each chain in the specified table. -func (ipt *IPTables) ListChains(table string) ([]string, error) { - args := []string{"-t", table, "-S"} - - result, err := ipt.executeList(args) - if err != nil { - return nil, err - } - - // Iterate over rules to find all default (-P) and user-specified (-N) chains. - // Chains definition always come before rules. - // Format is the following: - // -P OUTPUT ACCEPT - // -N Custom - var chains []string - for _, val := range result { - if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") { - chains = append(chains, strings.Fields(val)[1]) - } else { - break - } - } - return chains, nil -} - -// Stats lists rules including the byte and packet counts -func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { - args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} - lines, err := ipt.executeList(args) - if err != nil { - return nil, err - } - - appendSubnet := func(addr string) string { - if strings.IndexByte(addr, byte('/')) < 0 { - if strings.IndexByte(addr, '.') < 0 { - return addr + "/128" - } - return addr + "/32" - } - return addr - } - - ipv6 := ipt.proto == ProtocolIPv6 - - rows := [][]string{} - for i, line := range lines { - // Skip over chain name and field header - if i < 2 { - continue - } - - // Fields: - // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options - line = strings.TrimSpace(line) - fields := strings.Fields(line) - - // The ip6tables verbose output cannot be naively split due to the default "opt" - // field containing 2 single spaces. - if ipv6 { - // Check if field 6 is "opt" or "source" address - dest := fields[6] - ip, _, _ := net.ParseCIDR(dest) - if ip == nil { - ip = net.ParseIP(dest) - } - - // If we detected a CIDR or IP, the "opt" field is empty.. insert it. - if ip != nil { - f := []string{} - f = append(f, fields[:4]...) - f = append(f, " ") // Empty "opt" field for ip6tables - f = append(f, fields[4:]...) - fields = f - } - } - - // Adjust "source" and "destination" to include netmask, to match regular - // List output - fields[7] = appendSubnet(fields[7]) - fields[8] = appendSubnet(fields[8]) - - // Combine "options" fields 9... into a single space-delimited field. - options := fields[9:] - fields = fields[:9] - fields = append(fields, strings.Join(options, " ")) - rows = append(rows, fields) - } - return rows, nil -} - -func (ipt *IPTables) executeList(args []string) ([]string, error) { - var stdout bytes.Buffer - if err := ipt.runWithOutput(args, &stdout); err != nil { - return nil, err - } - - rules := strings.Split(stdout.String(), "\n") - - // strip trailing newline - if len(rules) > 0 && rules[len(rules)-1] == "" { - rules = rules[:len(rules)-1] - } - - // nftables mode doesn't return an error code when listing a non-existent - // chain. Patch that up. - if len(rules) == 0 && ipt.mode == "nf_tables" { - v := 1 - return nil, &Error{ - cmd: exec.Cmd{Args: args}, - msg: fmt.Sprintf("%s: No chain/target/match by that name.\n", getIptablesCommand(ipt.proto)), - proto: ipt.proto, - exitStatus: &v, - } - } - - for i, rule := range rules { - rules[i] = filterRuleOutput(rule) - } - - return rules, nil -} - -// NewChain creates a new chain in the specified table. -// If the chain already exists, it will result in an error. -func (ipt *IPTables) NewChain(table, chain string) error { - return ipt.run("-t", table, "-N", chain) -} - -// ClearChain flushed (deletes all rules) in the specified table/chain. -// If the chain does not exist, a new one will be created -func (ipt *IPTables) ClearChain(table, chain string) error { - err := ipt.NewChain(table, chain) - - // the exit code for "this table already exists" is different for - // different iptables modes - existsErr := 1 - if ipt.mode == "nf_tables" { - existsErr = 4 - } - - eerr, eok := err.(*Error) - switch { - case err == nil: - return nil - case eok && eerr.ExitStatus() == existsErr: - // chain already exists. Flush (clear) it. - return ipt.run("-t", table, "-F", chain) - default: - return err - } -} - -// RenameChain renames the old chain to the new one. -func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error { - return ipt.run("-t", table, "-E", oldChain, newChain) -} - -// DeleteChain deletes the chain in the specified table. -// The chain must be empty -func (ipt *IPTables) DeleteChain(table, chain string) error { - return ipt.run("-t", table, "-X", chain) -} - -// ChangePolicy changes policy on chain to target -func (ipt *IPTables) ChangePolicy(table, chain, target string) error { - return ipt.run("-t", table, "-P", chain, target) -} - -// Check if the underlying iptables command supports the --random-fully flag -func (ipt *IPTables) HasRandomFully() bool { - return ipt.hasRandomFully -} - -// Return version components of the underlying iptables command -func (ipt *IPTables) GetIptablesVersion() (int, int, int) { - return ipt.v1, ipt.v2, ipt.v3 -} - -// run runs an iptables command with the given arguments, ignoring -// any stdout output -func (ipt *IPTables) run(args ...string) error { - return ipt.runWithOutput(args, nil) -} - -// runWithOutput runs an iptables command with the given arguments, -// writing any stdout output to the given writer -func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { - args = append([]string{ipt.path}, args...) - if ipt.hasWait { - args = append(args, "--wait") - } else { - fmu, err := newXtablesFileLock() - if err != nil { - return err - } - ul, err := fmu.tryLock() - if err != nil { - return err - } - defer ul.Unlock() - } - - var stderr bytes.Buffer - cmd := exec.Cmd{ - Path: ipt.path, - Args: args, - Stdout: stdout, - Stderr: &stderr, - } - - if err := cmd.Run(); err != nil { - switch e := err.(type) { - case *exec.ExitError: - return &Error{*e, cmd, stderr.String(), ipt.proto, nil} - default: - return err - } - } - - return nil -} - -// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables". -func getIptablesCommand(proto Protocol) string { - if proto == ProtocolIPv6 { - return "ip6tables" - } else { - return "iptables" - } -} - -// Checks if iptables has the "-C" and "--wait" flag -func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool) { - return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesHasRandomFully(v1, v2, v3) -} - -// getIptablesVersion returns the first three components of the iptables version -// and the operating mode (e.g. nf_tables or legacy) -// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil) -func extractIptablesVersion(str string) (int, int, int, string, error) { - versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`) - result := versionMatcher.FindStringSubmatch(str) - if result == nil { - return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str) - } - - v1, err := strconv.Atoi(result[1]) - if err != nil { - return 0, 0, 0, "", err - } - - v2, err := strconv.Atoi(result[2]) - if err != nil { - return 0, 0, 0, "", err - } - - v3, err := strconv.Atoi(result[3]) - if err != nil { - return 0, 0, 0, "", err - } - - mode := "legacy" - if result[4] != "" { - mode = result[4] - } - return v1, v2, v3, mode, nil -} - -// Runs "iptables --version" to get the version string -func getIptablesVersionString(path string) (string, error) { - cmd := exec.Command(path, "--version") - var out bytes.Buffer - cmd.Stdout = &out - err := cmd.Run() - if err != nil { - return "", err - } - return out.String(), nil -} - -// Checks if an iptables version is after 1.4.11, when --check was added -func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool { - if v1 > 1 { - return true - } - if v1 == 1 && v2 > 4 { - return true - } - if v1 == 1 && v2 == 4 && v3 >= 11 { - return true - } - return false -} - -// Checks if an iptables version is after 1.4.20, when --wait was added -func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool { - if v1 > 1 { - return true - } - if v1 == 1 && v2 > 4 { - return true - } - if v1 == 1 && v2 == 4 && v3 >= 20 { - return true - } - return false -} - -// Checks if an iptables version is after 1.6.2, when --random-fully was added -func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool { - if v1 > 1 { - return true - } - if v1 == 1 && v2 > 6 { - return true - } - if v1 == 1 && v2 == 6 && v3 >= 2 { - return true - } - return false -} - -// Checks if a rule specification exists for a table -func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) { - rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ") - args := []string{"-t", table, "-S"} - var stdout bytes.Buffer - err := ipt.runWithOutput(args, &stdout) - if err != nil { - return false, err - } - return strings.Contains(stdout.String(), rs), nil -} - -// counterRegex is the regex used to detect nftables counter format -var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `) - -// filterRuleOutput works around some inconsistencies in output. -// For example, when iptables is in legacy vs. nftables mode, it produces -// different results. -func filterRuleOutput(rule string) string { - out := rule - - // work around an output difference in nftables mode where counters - // are output in iptables-save format, rather than iptables -S format - // The string begins with "[0:0]" - // - // Fixes #49 - if groups := counterRegex.FindStringSubmatch(out); groups != nil { - // drop the brackets - out = out[len(groups[0]):] - out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2]) - } - - return out -} diff --git a/vendor/github.com/coreos/go-iptables/iptables/lock.go b/vendor/github.com/coreos/go-iptables/iptables/lock.go deleted file mode 100644 index a88e92b4e..000000000 --- a/vendor/github.com/coreos/go-iptables/iptables/lock.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package iptables - -import ( - "os" - "sync" - "syscall" -) - -const ( - // In earlier versions of iptables, the xtables lock was implemented - // via a Unix socket, but now flock is used via this lockfile: - // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707 - // Note the LSB-conforming "/run" directory does not exist on old - // distributions, so assume "/var" is symlinked - xtablesLockFilePath = "/var/run/xtables.lock" - - defaultFilePerm = 0600 -) - -type Unlocker interface { - Unlock() error -} - -type nopUnlocker struct{} - -func (_ nopUnlocker) Unlock() error { return nil } - -type fileLock struct { - // mu is used to protect against concurrent invocations from within this process - mu sync.Mutex - fd int -} - -// tryLock takes an exclusive lock on the xtables lock file without blocking. -// This is best-effort only: if the exclusive lock would block (i.e. because -// another process already holds it), no error is returned. Otherwise, any -// error encountered during the locking operation is returned. -// The returned Unlocker should be used to release the lock when the caller is -// done invoking iptables commands. -func (l *fileLock) tryLock() (Unlocker, error) { - l.mu.Lock() - err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) - switch err { - case syscall.EWOULDBLOCK: - l.mu.Unlock() - return nopUnlocker{}, nil - case nil: - return l, nil - default: - l.mu.Unlock() - return nil, err - } -} - -// Unlock closes the underlying file, which implicitly unlocks it as well. It -// also unlocks the associated mutex. -func (l *fileLock) Unlock() error { - defer l.mu.Unlock() - return syscall.Close(l.fd) -} - -// newXtablesFileLock opens a new lock on the xtables lockfile without -// acquiring the lock -func newXtablesFileLock() (*fileLock, error) { - fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm) - if err != nil { - return nil, err - } - return &fileLock{fd: fd}, nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4b992352c..3acff38c9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -144,8 +144,6 @@ github.com/containers/storage/drivers/quota github.com/containers/storage/pkg/fsutils github.com/containers/storage/pkg/ostree github.com/containers/storage/drivers/copy -# github.com/coreos/go-iptables v0.4.1 -github.com/coreos/go-iptables/iptables # github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a github.com/coreos/go-systemd/activation github.com/coreos/go-systemd/dbus |